Commit aafcd5d7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 relocation changes from Ingo Molnar:
 "This tree contains a single change, ELF relocation handling in C - one
  of the kernel randomization patches that makes sense even without
  randomization present upstream"

* 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, relocs: Move ELF relocation handling to C
parents 6832d965 a0215061
...@@ -1716,9 +1716,10 @@ config X86_NEED_RELOCS ...@@ -1716,9 +1716,10 @@ config X86_NEED_RELOCS
depends on X86_32 && RELOCATABLE depends on X86_32 && RELOCATABLE
config PHYSICAL_ALIGN config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned" if X86_32 hex "Alignment value to which kernel should be aligned"
default "0x1000000" default "0x1000000"
range 0x2000 0x1000000 range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help--- ---help---
This value puts the alignment restrictions on physical address This value puts the alignment restrictions on physical address
where kernel is loaded and run from. Kernel is compiled for an where kernel is loaded and run from. Kernel is compiled for an
...@@ -1736,6 +1737,9 @@ config PHYSICAL_ALIGN ...@@ -1736,6 +1737,9 @@ config PHYSICAL_ALIGN
end result is that kernel runs from a physical address meeting end result is that kernel runs from a physical address meeting
above alignment restrictions. above alignment restrictions.
On 32-bit this value must be a multiple of 0x2000. On 64-bit
this value must be a multiple of 0x200000.
Don't change this unless you know what you are doing. Don't change this unless you know what you are doing.
config HOTPLUG_CPU config HOTPLUG_CPU
......
...@@ -16,6 +16,10 @@ endif ...@@ -16,6 +16,10 @@ endif
# e.g.: obj-y += foo_$(BITS).o # e.g.: obj-y += foo_$(BITS).o
export BITS export BITS
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs
endif
ifeq ($(CONFIG_X86_32),y) ifeq ($(CONFIG_X86_32),y)
BITS := 32 BITS := 32
UTS_MACHINE := i386 UTS_MACHINE := i386
...@@ -25,10 +29,6 @@ ifeq ($(CONFIG_X86_32),y) ...@@ -25,10 +29,6 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_AFLAGS += $(biarch) KBUILD_AFLAGS += $(biarch)
KBUILD_CFLAGS += $(biarch) KBUILD_CFLAGS += $(biarch)
ifdef CONFIG_RELOCATABLE
LDFLAGS_vmlinux := --emit-relocs
endif
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
# Never want PIC in a 32-bit kernel, prevent breakage with GCC built # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
......
...@@ -181,8 +181,9 @@ relocated: ...@@ -181,8 +181,9 @@ relocated:
/* /*
* Do the decompression, and jump to the new kernel.. * Do the decompression, and jump to the new kernel..
*/ */
leal z_extract_offset_negative(%ebx), %ebp
/* push arguments for decompress_kernel: */ /* push arguments for decompress_kernel: */
pushl $z_output_len /* decompressed length */
leal z_extract_offset_negative(%ebx), %ebp
pushl %ebp /* output address */ pushl %ebp /* output address */
pushl $z_input_len /* input_len */ pushl $z_input_len /* input_len */
leal input_data(%ebx), %eax leal input_data(%ebx), %eax
...@@ -191,33 +192,7 @@ relocated: ...@@ -191,33 +192,7 @@ relocated:
pushl %eax /* heap area */ pushl %eax /* heap area */
pushl %esi /* real mode pointer */ pushl %esi /* real mode pointer */
call decompress_kernel call decompress_kernel
addl $20, %esp addl $24, %esp
#if CONFIG_RELOCATABLE
/*
* Find the address of the relocations.
*/
leal z_output_len(%ebp), %edi
/*
* Calculate the delta between where vmlinux was compiled to run
* and where it was actually loaded.
*/
movl %ebp, %ebx
subl $LOAD_PHYSICAL_ADDR, %ebx
jz 2f /* Nothing to be done if loaded at compiled addr. */
/*
* Process relocations.
*/
1: subl $4, %edi
movl (%edi), %ecx
testl %ecx, %ecx
jz 2f
addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
jmp 1b
2:
#endif
/* /*
* Jump to the decompressed kernel. * Jump to the decompressed kernel.
......
...@@ -338,6 +338,7 @@ relocated: ...@@ -338,6 +338,7 @@ relocated:
leaq input_data(%rip), %rdx /* input_data */ leaq input_data(%rip), %rdx /* input_data */
movl $z_input_len, %ecx /* input_len */ movl $z_input_len, %ecx /* input_len */
movq %rbp, %r8 /* output target address */ movq %rbp, %r8 /* output target address */
movq $z_output_len, %r9 /* decompressed length */
call decompress_kernel call decompress_kernel
popq %rsi popq %rsi
......
...@@ -271,6 +271,79 @@ static void error(char *x) ...@@ -271,6 +271,79 @@ static void error(char *x)
asm("hlt"); asm("hlt");
} }
#if CONFIG_X86_NEED_RELOCS
static void handle_relocations(void *output, unsigned long output_len)
{
int *reloc;
unsigned long delta, map, ptr;
unsigned long min_addr = (unsigned long)output;
unsigned long max_addr = min_addr + output_len;
/*
* Calculate the delta between where vmlinux was linked to load
* and where it was actually loaded.
*/
delta = min_addr - LOAD_PHYSICAL_ADDR;
if (!delta) {
debug_putstr("No relocation needed... ");
return;
}
debug_putstr("Performing relocations... ");
/*
* The kernel contains a table of relocation addresses. Those
* addresses have the final load address of the kernel in virtual
* memory. We are currently working in the self map. So we need to
* create an adjustment for kernel memory addresses to the self map.
* This will involve subtracting out the base address of the kernel.
*/
map = delta - __START_KERNEL_map;
/*
* Process relocations: 32 bit relocations first then 64 bit after.
* Two sets of binary relocations are added to the end of the kernel
* before compression. Each relocation table entry is the kernel
* address of the location which needs to be updated stored as a
* 32-bit value which is sign extended to 64 bits.
*
* Format is:
*
* kernel bits...
* 0 - zero terminator for 64 bit relocations
* 64 bit relocation repeated
* 0 - zero terminator for 32 bit relocations
* 32 bit relocation repeated
*
* So we work backwards from the end of the decompressed image.
*/
for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
int extended = *reloc;
extended += map;
ptr = (unsigned long)extended;
if (ptr < min_addr || ptr > max_addr)
error("32-bit relocation outside of kernel!\n");
*(uint32_t *)ptr += delta;
}
#ifdef CONFIG_X86_64
for (reloc--; *reloc; reloc--) {
long extended = *reloc;
extended += map;
ptr = (unsigned long)extended;
if (ptr < min_addr || ptr > max_addr)
error("64-bit relocation outside of kernel!\n");
*(uint64_t *)ptr += delta;
}
#endif
}
#else
static inline void handle_relocations(void *output, unsigned long output_len)
{ }
#endif
static void parse_elf(void *output) static void parse_elf(void *output)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -325,7 +398,8 @@ static void parse_elf(void *output) ...@@ -325,7 +398,8 @@ static void parse_elf(void *output)
asmlinkage void decompress_kernel(void *rmode, memptr heap, asmlinkage void decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data, unsigned char *input_data,
unsigned long input_len, unsigned long input_len,
unsigned char *output) unsigned char *output,
unsigned long output_len)
{ {
real_mode = rmode; real_mode = rmode;
...@@ -365,6 +439,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, ...@@ -365,6 +439,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
debug_putstr("\nDecompressing Linux... "); debug_putstr("\nDecompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error); decompress(input_data, input_len, NULL, NULL, output, NULL, error);
parse_elf(output); parse_elf(output);
handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n"); debug_putstr("done.\nBooting the kernel.\n");
return; return;
} }
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
*/ */
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#define __START_KERNEL_map __PAGE_OFFSET
#define THREAD_SIZE_ORDER 1 #define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
......
...@@ -32,11 +32,6 @@ ...@@ -32,11 +32,6 @@
*/ */
#define __PAGE_OFFSET _AC(0xffff880000000000, UL) #define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START ((CONFIG_PHYSICAL_START + \
(CONFIG_PHYSICAL_ALIGN - 1)) & \
~(CONFIG_PHYSICAL_ALIGN - 1))
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL) #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
......
...@@ -33,6 +33,11 @@ ...@@ -33,6 +33,11 @@
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \
CONFIG_PHYSICAL_ALIGN)
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#include <asm/page_64_types.h> #include <asm/page_64_types.h>
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment