Commit 34eb62d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'core-build-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull orphan section checking from Ingo Molnar:
 "Orphan link sections were a long-standing source of obscure bugs,
  because the heuristics that various linkers & compilers use to handle
  them (include these bits into the output image vs discarding them
  silently) are both highly idiosyncratic and also version dependent.

  Instead of this historically problematic mess, this tree by Kees Cook
  (et al) adds build time asserts and build time warnings if there's any
  orphan section in the kernel or if a section is not sized as expected.

  And because we relied on so many silent assumptions in this area, fix
  a metric ton of dependencies and some outright bugs related to this,
  before we can finally enable the checks on the x86, ARM and ARM64
  platforms"

* tag 'core-build-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits)
  x86/boot/compressed: Warn on orphan section placement
  x86/build: Warn on orphan section placement
  arm/boot: Warn on orphan section placement
  arm/build: Warn on orphan section placement
  arm64/build: Warn on orphan section placement
  x86/boot/compressed: Add missing debugging sections to output
  x86/boot/compressed: Remove, discard, or assert for unwanted sections
  x86/boot/compressed: Reorganize zero-size section asserts
  x86/build: Add asserts for unwanted sections
  x86/build: Enforce an empty .got.plt section
  x86/asm: Avoid generating unused kprobe sections
  arm/boot: Handle all sections explicitly
  arm/build: Assert for unwanted sections
  arm/build: Add missing sections
  arm/build: Explicitly keep .ARM.attributes sections
  arm/build: Refactor linker script headers
  arm64/build: Assert for unwanted sections
  arm64/build: Add missing DWARF sections
  arm64/build: Use common DISCARDS in linker script
  arm64/build: Remove .eh_frame* sections due to unwind tables
  ...
parents e6412f98 6e0bf0e0
...@@ -72,6 +72,7 @@ SECTIONS ...@@ -72,6 +72,7 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -122,6 +122,7 @@ SECTIONS ...@@ -122,6 +122,7 @@ SECTIONS
_end = . ; _end = . ;
STABS_DEBUG STABS_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
.arcextmap 0 : { .arcextmap 0 : {
......
...@@ -16,6 +16,10 @@ LDFLAGS_vmlinux += --be8 ...@@ -16,6 +16,10 @@ LDFLAGS_vmlinux += --be8
KBUILD_LDFLAGS_MODULE += --be8 KBUILD_LDFLAGS_MODULE += --be8
endif endif
# We never want expected sections to be placed heuristically by the
# linker. All sections should be explicitly named in the linker script.
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
ifeq ($(CONFIG_ARM_MODULE_PLTS),y) ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
KBUILD_LDS_MODULE += $(srctree)/arch/arm/kernel/module.lds KBUILD_LDS_MODULE += $(srctree)/arch/arm/kernel/module.lds
endif endif
......
...@@ -123,6 +123,8 @@ endif ...@@ -123,6 +123,8 @@ endif
LDFLAGS_vmlinux += --no-undefined LDFLAGS_vmlinux += --no-undefined
# Delete all temporary local symbols # Delete all temporary local symbols
LDFLAGS_vmlinux += -X LDFLAGS_vmlinux += -X
# Report orphan sections
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
# Next argument is a linker script # Next argument is a linker script
LDFLAGS_vmlinux += -T LDFLAGS_vmlinux += -T
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* /*
* Copyright (C) 2000 Russell King * Copyright (C) 2000 Russell King
*/ */
#include <asm/vmlinux.lds.h>
#ifdef CONFIG_CPU_ENDIAN_BE8 #ifdef CONFIG_CPU_ENDIAN_BE8
#define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \ #define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \
...@@ -17,8 +18,11 @@ ENTRY(_start) ...@@ -17,8 +18,11 @@ ENTRY(_start)
SECTIONS SECTIONS
{ {
/DISCARD/ : { /DISCARD/ : {
COMMON_DISCARDS
*(.ARM.exidx*) *(.ARM.exidx*)
*(.ARM.extab*) *(.ARM.extab*)
*(.note.*)
*(.rel.*)
/* /*
* Discard any r/w data - this produces a link error if we have any, * Discard any r/w data - this produces a link error if we have any,
* which is required for PIC decompression. Local data generates * which is required for PIC decompression. Local data generates
...@@ -36,9 +40,7 @@ SECTIONS ...@@ -36,9 +40,7 @@ SECTIONS
*(.start) *(.start)
*(.text) *(.text)
*(.text.*) *(.text.*)
*(.gnu.warning) ARM_STUBS_TEXT
*(.glue_7t)
*(.glue_7)
} }
.table : ALIGN(4) { .table : ALIGN(4) {
_table_start = .; _table_start = .;
...@@ -128,12 +130,10 @@ SECTIONS ...@@ -128,12 +130,10 @@ SECTIONS
PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data)); PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data));
PROVIDE(__pecoff_end = ALIGN(512)); PROVIDE(__pecoff_end = ALIGN(512));
.stab 0 : { *(.stab) } STABS_DEBUG
.stabstr 0 : { *(.stabstr) } DWARF_DEBUG
.stab.excl 0 : { *(.stab.excl) } ARM_DETAILS
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) } ARM_ASSERTS
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
} }
ASSERT(_edata_real == _edata, "error: zImage file size is incorrect"); ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x) #define ARM_CPU_DISCARD(x)
...@@ -49,8 +50,29 @@ ...@@ -49,8 +50,29 @@
EXIT_CALL \ EXIT_CALL \
ARM_MMU_DISCARD(*(.text.fixup)) \ ARM_MMU_DISCARD(*(.text.fixup)) \
ARM_MMU_DISCARD(*(__ex_table)) \ ARM_MMU_DISCARD(*(__ex_table)) \
*(.discard) \ COMMON_DISCARDS
*(.discard.*)
/*
* Sections that should stay zero sized, which is safer to explicitly
* check instead of blindly discarding.
*/
#define ARM_ASSERTS \
.plt : { \
*(.iplt) *(.rel.iplt) *(.iplt) *(.igot.plt) \
} \
ASSERT(SIZEOF(.plt) == 0, \
"Unexpected run-time procedure linkages detected!")
#define ARM_DETAILS \
ELF_DETAILS \
.ARM.attributes 0 : { *(.ARM.attributes) }
#define ARM_STUBS_TEXT \
*(.gnu.warning) \
*(.glue_7) \
*(.glue_7t) \
*(.vfp11_veneer) \
*(.v4_bx)
#define ARM_TEXT \ #define ARM_TEXT \
IDMAP_TEXT \ IDMAP_TEXT \
...@@ -64,9 +86,7 @@ ...@@ -64,9 +86,7 @@
CPUIDLE_TEXT \ CPUIDLE_TEXT \
LOCK_TEXT \ LOCK_TEXT \
KPROBES_TEXT \ KPROBES_TEXT \
*(.gnu.warning) \ ARM_STUBS_TEXT \
*(.glue_7) \
*(.glue_7t) \
. = ALIGN(4); \ . = ALIGN(4); \
*(.got) /* Global offset table */ \ *(.got) /* Global offset table */ \
ARM_CPU_KEEP(PROC_INFO) ARM_CPU_KEEP(PROC_INFO)
......
...@@ -9,15 +9,13 @@ ...@@ -9,15 +9,13 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <asm-generic/vmlinux.lds.h> #include <asm/vmlinux.lds.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mpu.h> #include <asm/mpu.h>
#include <asm/page.h> #include <asm/page.h>
#include "vmlinux.lds.h"
OUTPUT_ARCH(arm) OUTPUT_ARCH(arm)
ENTRY(stext) ENTRY(stext)
...@@ -152,6 +150,10 @@ SECTIONS ...@@ -152,6 +150,10 @@ SECTIONS
_end = .; _end = .;
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG
ARM_DETAILS
ARM_ASSERTS
} }
/* /*
......
...@@ -9,15 +9,13 @@ ...@@ -9,15 +9,13 @@
#else #else
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm-generic/vmlinux.lds.h> #include <asm/vmlinux.lds.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mpu.h> #include <asm/mpu.h>
#include <asm/page.h> #include <asm/page.h>
#include "vmlinux.lds.h"
OUTPUT_ARCH(arm) OUTPUT_ARCH(arm)
ENTRY(stext) ENTRY(stext)
...@@ -151,6 +149,10 @@ SECTIONS ...@@ -151,6 +149,10 @@ SECTIONS
_end = .; _end = .;
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG
ARM_DETAILS
ARM_ASSERTS
} }
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
......
...@@ -28,6 +28,10 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419 ...@@ -28,6 +28,10 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419
endif endif
endif endif
# We never want expected sections to be placed heuristically by the
# linker. All sections should be explicitly named in the linker script.
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y) ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y) ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
$(warning LSE atomics not supported by binutils) $(warning LSE atomics not supported by binutils)
...@@ -46,13 +50,16 @@ endif ...@@ -46,13 +50,16 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only \ KBUILD_CFLAGS += -mgeneral-regs-only \
$(compat_vdso) $(cc_has_k_constraint) $(compat_vdso) $(cc_has_k_constraint)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-disable-warning, psabi) KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(compat_vdso) KBUILD_AFLAGS += $(compat_vdso)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
# Avoid generating .eh_frame* sections.
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
prepare: stack_protector_prepare prepare: stack_protector_prepare
stack_protector_prepare: prepare0 stack_protector_prepare: prepare0
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <asm/assembler.h> #include <asm/assembler.h>
.macro SMCCC instr .macro SMCCC instr
.cfi_startproc
\instr #0 \instr #0
ldr x4, [sp] ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
...@@ -21,7 +20,6 @@ ...@@ -21,7 +20,6 @@
b.ne 1f b.ne 1f
str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
1: ret 1: ret
.cfi_endproc
.endm .endm
/* /*
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#define RO_EXCEPTION_TABLE_ALIGN 8 #define RO_EXCEPTION_TABLE_ALIGN 8
#define RUNTIME_DISCARD_EXIT
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h> #include <asm/cache.h>
...@@ -96,13 +97,10 @@ SECTIONS ...@@ -96,13 +97,10 @@ SECTIONS
* matching the same input section name. There is no documented * matching the same input section name. There is no documented
* order of matching. * order of matching.
*/ */
DISCARDS
/DISCARD/ : { /DISCARD/ : {
EXIT_CALL
*(.discard)
*(.discard.*)
*(.interp .dynamic) *(.interp .dynamic)
*(.dynsym .dynstr .hash .gnu.hash) *(.dynsym .dynstr .hash .gnu.hash)
*(.eh_frame)
} }
. = KIMAGE_VADDR; . = KIMAGE_VADDR;
...@@ -131,6 +129,14 @@ SECTIONS ...@@ -131,6 +129,14 @@ SECTIONS
*(.got) /* Global offset table */ *(.got) /* Global offset table */
} }
/*
* Make sure that the .got.plt is either completely empty or it
* contains only the lazy dispatch entries.
*/
.got.plt : { *(.got.plt) }
ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
"Unexpected GOT/PLT entries detected!")
. = ALIGN(SEGMENT_ALIGN); . = ALIGN(SEGMENT_ALIGN);
_etext = .; /* End of text section */ _etext = .; /* End of text section */
...@@ -249,8 +255,22 @@ SECTIONS ...@@ -249,8 +255,22 @@ SECTIONS
_end = .; _end = .;
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
HEAD_SYMBOLS HEAD_SYMBOLS
/*
* Sections that should stay zero sized, which is safer to
* explicitly check instead of blindly discarding.
*/
.plt : {
*(.plt) *(.plt.*) *(.iplt) *(.igot)
}
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
.data.rel.ro : { *(.data.rel.ro) }
ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
} }
#include "image-vars.h" #include "image-vars.h"
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 __section(".mmuoff.data.write") vabits_actual; u64 __section(.mmuoff.data.write) vabits_actual;
EXPORT_SYMBOL(vabits_actual); EXPORT_SYMBOL(vabits_actual);
u64 kimage_voffset __ro_after_init; u64 kimage_voffset __ro_after_init;
......
...@@ -109,6 +109,7 @@ SECTIONS ...@@ -109,6 +109,7 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -67,5 +67,6 @@ SECTIONS ...@@ -67,5 +67,6 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
} }
...@@ -218,6 +218,7 @@ SECTIONS { ...@@ -218,6 +218,7 @@ SECTIONS {
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
/* Default discards */ /* Default discards */
DISCARDS DISCARDS
......
...@@ -202,6 +202,7 @@ SECTIONS ...@@ -202,6 +202,7 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
/* These must appear regardless of . */ /* These must appear regardless of . */
.gptab.sdata : { .gptab.sdata : {
......
...@@ -64,6 +64,7 @@ SECTIONS ...@@ -64,6 +64,7 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -58,6 +58,7 @@ SECTIONS ...@@ -58,6 +58,7 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -103,6 +103,7 @@ SECTIONS ...@@ -103,6 +103,7 @@ SECTIONS
/* Throw in the debugging sections */ /* Throw in the debugging sections */
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
/* Sections to be discarded -- must be last */ /* Sections to be discarded -- must be last */
DISCARDS DISCARDS
......
...@@ -84,6 +84,7 @@ SECTIONS ...@@ -84,6 +84,7 @@ SECTIONS
} }
STABS_DEBUG STABS_DEBUG
ELF_DETAILS
.note 0 : { *(.note) } .note 0 : { *(.note) }
/* Sections to be discarded */ /* Sections to be discarded */
......
...@@ -164,6 +164,7 @@ SECTIONS ...@@ -164,6 +164,7 @@ SECTIONS
_end = . ; _end = . ;
STABS_DEBUG STABS_DEBUG
ELF_DETAILS
.note 0 : { *(.note) } .note 0 : { *(.note) }
/* Sections to be discarded */ /* Sections to be discarded */
......
...@@ -360,8 +360,8 @@ SECTIONS ...@@ -360,8 +360,8 @@ SECTIONS
PROVIDE32 (end = .); PROVIDE32 (end = .);
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
/DISCARD/ : { /DISCARD/ : {
......
...@@ -98,6 +98,7 @@ SECTIONS ...@@ -98,6 +98,7 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -181,6 +181,7 @@ SECTIONS ...@@ -181,6 +181,7 @@ SECTIONS
/* Debugging sections. */ /* Debugging sections. */
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
/* Sections to be discarded */ /* Sections to be discarded */
DISCARDS DISCARDS
......
...@@ -76,6 +76,7 @@ SECTIONS ...@@ -76,6 +76,7 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -187,6 +187,7 @@ SECTIONS ...@@ -187,6 +187,7 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -164,8 +164,8 @@ SECTIONS ...@@ -164,8 +164,8 @@ SECTIONS
PROVIDE (end = .); PROVIDE (end = .);
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -108,8 +108,8 @@ SECTIONS ...@@ -108,8 +108,8 @@ SECTIONS
PROVIDE (end = .); PROVIDE (end = .);
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
} }
...@@ -209,6 +209,10 @@ ifdef CONFIG_X86_64 ...@@ -209,6 +209,10 @@ ifdef CONFIG_X86_64
LDFLAGS_vmlinux += -z max-page-size=0x200000 LDFLAGS_vmlinux += -z max-page-size=0x200000
endif endif
# We never want expected sections to be placed heuristically by the
# linker. All sections should be explicitly named in the linker script.
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
archscripts: scripts_basic archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs $(Q)$(MAKE) $(build)=arch/x86/tools relocs
......
...@@ -29,7 +29,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ ...@@ -29,7 +29,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst
KBUILD_CFLAGS := -m$(BITS) -O2 KBUILD_CFLAGS := -m$(BITS) -O2
KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small cflags-$(CONFIG_X86_64) := -mcmodel=small
...@@ -45,24 +45,19 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables ...@@ -45,24 +45,19 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += -D__DISABLE_EXPORTS KBUILD_CFLAGS += -D__DISABLE_EXPORTS
# Disable relocation relaxation in case the link is not PIE. # Disable relocation relaxation in case the link is not PIE.
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n GCOV_PROFILE := n
UBSAN_SANITIZE :=n UBSAN_SANITIZE :=n
KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info)
# Compressed kernel should be built as PIE since it may be loaded at any # Compressed kernel should be built as PIE since it may be loaded at any
# address by the bootloader. # address by the bootloader.
ifeq ($(CONFIG_X86_32),y) LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker)
KBUILD_LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
else LDFLAGS_vmlinux += -T
# To build 64-bit compressed kernel as PIE, we disable relocation
# overflow check to avoid relocation overflow error with a new linker
# command-line option, -z noreloc-overflow.
KBUILD_LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
endif
LDFLAGS_vmlinux := -T
hostprogs := mkpiggy hostprogs := mkpiggy
HOST_EXTRACFLAGS += -I$(srctree)/tools/include HOST_EXTRACFLAGS += -I$(srctree)/tools/include
...@@ -96,30 +91,8 @@ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o ...@@ -96,30 +91,8 @@ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a
# The compressed kernel is built with -fPIC/-fPIE so that a boot loader
# can place it anywhere in memory and it will still run. However, since
# it is executed as-is without any ELF relocation processing performed
# (and has already had all relocation sections stripped from the binary),
# none of the code can use data relocations (e.g. static assignments of
# pointer values), since they will be meaningless at runtime. This check
# will refuse to link the vmlinux if any of these relocations are found.
quiet_cmd_check_data_rel = DATAREL $@
define cmd_check_data_rel
for obj in $(filter %.o,$^); do \
$(READELF) -S $$obj | grep -qF .rel.local && { \
echo "error: $$obj has data relocations!" >&2; \
exit 1; \
} || true; \
done
endef
# We need to run two commands under "if_changed", so merge them into a
# single invocation.
quiet_cmd_check-and-link-vmlinux = LD $@
cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE $(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE
$(call if_changed,check-and-link-vmlinux) $(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.bin: vmlinux FORCE
......
...@@ -33,32 +33,13 @@ ...@@ -33,32 +33,13 @@
#include <asm/bootparam.h> #include <asm/bootparam.h>
/* /*
* The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X * These symbols needed to be marked as .hidden to prevent the BFD linker from
* relocation to get the symbol address in PIC. When the compressed x86 * generating R_386_32 (rather than R_386_RELATIVE) relocations for them when
* kernel isn't built as PIC, the linker optimizes R_386_GOT32X * the 32-bit compressed kernel is linked as PIE. This is no longer necessary,
* relocations to their fixed symbol addresses. However, when the * but it doesn't hurt to keep them .hidden.
* compressed x86 kernel is loaded at a different address, it leads
* to the following load failure:
*
* Failed to allocate space for phdrs
*
* during the decompression stage.
*
* If the compressed x86 kernel is relocatable at run-time, it should be
* compiled with -fPIE, instead of -fPIC, if possible and should be built as
* Position Independent Executable (PIE) so that linker won't optimize
* R_386_GOT32X relocation to its fixed symbol address. Older
* linkers generate R_386_32 relocations against locally defined symbols,
* _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less
* optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
* R_386_32 relocations when relocating the kernel. To generate
* R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as
* hidden:
*/ */
.hidden _bss .hidden _bss
.hidden _ebss .hidden _ebss
.hidden _got
.hidden _egot
.hidden _end .hidden _end
__HEAD __HEAD
...@@ -77,10 +58,10 @@ SYM_FUNC_START(startup_32) ...@@ -77,10 +58,10 @@ SYM_FUNC_START(startup_32)
leal (BP_scratch+4)(%esi), %esp leal (BP_scratch+4)(%esi), %esp
call 1f call 1f
1: popl %edx 1: popl %edx
subl $1b, %edx addl $_GLOBAL_OFFSET_TABLE_+(.-1b), %edx
/* Load new GDT */ /* Load new GDT */
leal gdt(%edx), %eax leal gdt@GOTOFF(%edx), %eax
movl %eax, 2(%eax) movl %eax, 2(%eax)
lgdt (%eax) lgdt (%eax)
...@@ -93,14 +74,16 @@ SYM_FUNC_START(startup_32) ...@@ -93,14 +74,16 @@ SYM_FUNC_START(startup_32)
movl %eax, %ss movl %eax, %ss
/* /*
* %edx contains the address we are loaded at by the boot loader and %ebx * %edx contains the address we are loaded at by the boot loader (plus the
* contains the address where we should move the kernel image temporarily * offset to the GOT). The below code calculates %ebx to be the address where
* for safe in-place decompression. %ebp contains the address that the kernel * we should move the kernel image temporarily for safe in-place decompression
* will be decompressed to. * (again, plus the offset to the GOT).
*
* %ebp is calculated to be the address that the kernel will be decompressed to.
*/ */
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
movl %edx, %ebx leal startup_32@GOTOFF(%edx), %ebx
#ifdef CONFIG_EFI_STUB #ifdef CONFIG_EFI_STUB
/* /*
...@@ -111,7 +94,7 @@ SYM_FUNC_START(startup_32) ...@@ -111,7 +94,7 @@ SYM_FUNC_START(startup_32)
* image_offset = startup_32 - image_base * image_offset = startup_32 - image_base
* Otherwise image_offset will be zero and has no effect on the calculations. * Otherwise image_offset will be zero and has no effect on the calculations.
*/ */
subl image_offset(%edx), %ebx subl image_offset@GOTOFF(%edx), %ebx
#endif #endif
movl BP_kernel_alignment(%esi), %eax movl BP_kernel_alignment(%esi), %eax
...@@ -128,10 +111,10 @@ SYM_FUNC_START(startup_32) ...@@ -128,10 +111,10 @@ SYM_FUNC_START(startup_32)
movl %ebx, %ebp // Save the output address for later movl %ebx, %ebp // Save the output address for later
/* Target address to relocate to for decompression */ /* Target address to relocate to for decompression */
addl BP_init_size(%esi), %ebx addl BP_init_size(%esi), %ebx
subl $_end, %ebx subl $_end@GOTOFF, %ebx
/* Set up the stack */ /* Set up the stack */
leal boot_stack_end(%ebx), %esp leal boot_stack_end@GOTOFF(%ebx), %esp
/* Zero EFLAGS */ /* Zero EFLAGS */
pushl $0 pushl $0
...@@ -142,8 +125,8 @@ SYM_FUNC_START(startup_32) ...@@ -142,8 +125,8 @@ SYM_FUNC_START(startup_32)
* where decompression in place becomes safe. * where decompression in place becomes safe.
*/ */
pushl %esi pushl %esi
leal (_bss-4)(%edx), %esi leal (_bss@GOTOFF-4)(%edx), %esi
leal (_bss-4)(%ebx), %edi leal (_bss@GOTOFF-4)(%ebx), %edi
movl $(_bss - startup_32), %ecx movl $(_bss - startup_32), %ecx
shrl $2, %ecx shrl $2, %ecx
std std
...@@ -156,14 +139,14 @@ SYM_FUNC_START(startup_32) ...@@ -156,14 +139,14 @@ SYM_FUNC_START(startup_32)
* during extract_kernel below. To avoid any issues, repoint the GDTR * during extract_kernel below. To avoid any issues, repoint the GDTR
* to the new copy of the GDT. * to the new copy of the GDT.
*/ */
leal gdt(%ebx), %eax leal gdt@GOTOFF(%ebx), %eax
movl %eax, 2(%eax) movl %eax, 2(%eax)
lgdt (%eax) lgdt (%eax)
/* /*
* Jump to the relocated address. * Jump to the relocated address.
*/ */
leal .Lrelocated(%ebx), %eax leal .Lrelocated@GOTOFF(%ebx), %eax
jmp *%eax jmp *%eax
SYM_FUNC_END(startup_32) SYM_FUNC_END(startup_32)
...@@ -173,7 +156,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry) ...@@ -173,7 +156,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry)
add $0x4, %esp add $0x4, %esp
movl 8(%esp), %esi /* save boot_params pointer */ movl 8(%esp), %esi /* save boot_params pointer */
call efi_main call efi_main
leal startup_32(%eax), %eax /* efi_main returns the possibly relocated address of startup_32 */
jmp *%eax jmp *%eax
SYM_FUNC_END(efi32_stub_entry) SYM_FUNC_END(efi32_stub_entry)
SYM_FUNC_END_ALIAS(efi_stub_entry) SYM_FUNC_END_ALIAS(efi_stub_entry)
...@@ -186,40 +169,26 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) ...@@ -186,40 +169,26 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
* Clear BSS (stack is currently empty) * Clear BSS (stack is currently empty)
*/ */
xorl %eax, %eax xorl %eax, %eax
leal _bss(%ebx), %edi leal _bss@GOTOFF(%ebx), %edi
leal _ebss(%ebx), %ecx leal _ebss@GOTOFF(%ebx), %ecx
subl %edi, %ecx subl %edi, %ecx
shrl $2, %ecx shrl $2, %ecx
rep stosl rep stosl
/*
* Adjust our own GOT
*/
leal _got(%ebx), %edx
leal _egot(%ebx), %ecx
1:
cmpl %ecx, %edx
jae 2f
addl %ebx, (%edx)
addl $4, %edx
jmp 1b
2:
/* /*
* Do the extraction, and jump to the new kernel.. * Do the extraction, and jump to the new kernel..
*/ */
/* push arguments for extract_kernel: */ /* push arguments for extract_kernel: */
pushl $z_output_len /* decompressed length, end of relocs */
pushl output_len@GOTOFF(%ebx) /* decompressed length, end of relocs */
pushl %ebp /* output address */ pushl %ebp /* output address */
pushl input_len@GOTOFF(%ebx) /* input_len */
pushl $z_input_len /* input_len */ leal input_data@GOTOFF(%ebx), %eax
leal input_data(%ebx), %eax pushl %eax /* input_data */
pushl %eax /* input_data */ leal boot_heap@GOTOFF(%ebx), %eax
leal boot_heap(%ebx), %eax pushl %eax /* heap area */
pushl %eax /* heap area */ pushl %esi /* real mode pointer */
pushl %esi /* real mode pointer */ call extract_kernel /* returns kernel location in %eax */
call extract_kernel /* returns kernel location in %eax */
addl $24, %esp addl $24, %esp
/* /*
......
...@@ -40,11 +40,35 @@ ...@@ -40,11 +40,35 @@
*/ */
.hidden _bss .hidden _bss
.hidden _ebss .hidden _ebss
.hidden _got
.hidden _egot
.hidden _end .hidden _end
__HEAD __HEAD
/*
* This macro gives the relative virtual address of X, i.e. the offset of X
* from startup_32. This is the same as the link-time virtual address of X,
* since startup_32 is at 0, but defining it this way tells the
* assembler/linker that we do not want the actual run-time address of X. This
* prevents the linker from trying to create unwanted run-time relocation
* entries for the reference when the compressed kernel is linked as PIE.
*
* A reference X(%reg) will result in the link-time VA of X being stored with
* the instruction, and a run-time R_X86_64_RELATIVE relocation entry that
* adds the 64-bit base address where the kernel is loaded.
*
* Replacing it with (X-startup_32)(%reg) results in the offset being stored,
* and no run-time relocation.
*
* The macro should be used as a displacement with a base register containing
* the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate
* [$ rva(X)].
*
* This macro can only be used from within the .head.text section, since the
* expression requires startup_32 to be in the same section as the code being
* assembled.
*/
#define rva(X) ((X) - startup_32)
.code32 .code32
SYM_FUNC_START(startup_32) SYM_FUNC_START(startup_32)
/* /*
...@@ -67,10 +91,10 @@ SYM_FUNC_START(startup_32) ...@@ -67,10 +91,10 @@ SYM_FUNC_START(startup_32)
leal (BP_scratch+4)(%esi), %esp leal (BP_scratch+4)(%esi), %esp
call 1f call 1f
1: popl %ebp 1: popl %ebp
subl $1b, %ebp subl $ rva(1b), %ebp
/* Load new GDT with the 64bit segments using 32bit descriptor */ /* Load new GDT with the 64bit segments using 32bit descriptor */
leal gdt(%ebp), %eax leal rva(gdt)(%ebp), %eax
movl %eax, 2(%eax) movl %eax, 2(%eax)
lgdt (%eax) lgdt (%eax)
...@@ -83,7 +107,7 @@ SYM_FUNC_START(startup_32) ...@@ -83,7 +107,7 @@ SYM_FUNC_START(startup_32)
movl %eax, %ss movl %eax, %ss
/* setup a stack and make sure cpu supports long mode. */ /* setup a stack and make sure cpu supports long mode. */
leal boot_stack_end(%ebp), %esp leal rva(boot_stack_end)(%ebp), %esp
call verify_cpu call verify_cpu
testl %eax, %eax testl %eax, %eax
...@@ -110,7 +134,7 @@ SYM_FUNC_START(startup_32) ...@@ -110,7 +134,7 @@ SYM_FUNC_START(startup_32)
* image_offset = startup_32 - image_base * image_offset = startup_32 - image_base
* Otherwise image_offset will be zero and has no effect on the calculations. * Otherwise image_offset will be zero and has no effect on the calculations.
*/ */
subl image_offset(%ebp), %ebx subl rva(image_offset)(%ebp), %ebx
#endif #endif
movl BP_kernel_alignment(%esi), %eax movl BP_kernel_alignment(%esi), %eax
...@@ -126,7 +150,7 @@ SYM_FUNC_START(startup_32) ...@@ -126,7 +150,7 @@ SYM_FUNC_START(startup_32)
/* Target address to relocate to for decompression */ /* Target address to relocate to for decompression */
addl BP_init_size(%esi), %ebx addl BP_init_size(%esi), %ebx
subl $_end, %ebx subl $ rva(_end), %ebx
/* /*
* Prepare for entering 64 bit mode * Prepare for entering 64 bit mode
...@@ -154,19 +178,19 @@ SYM_FUNC_START(startup_32) ...@@ -154,19 +178,19 @@ SYM_FUNC_START(startup_32)
1: 1:
/* Initialize Page tables to 0 */ /* Initialize Page tables to 0 */
leal pgtable(%ebx), %edi leal rva(pgtable)(%ebx), %edi
xorl %eax, %eax xorl %eax, %eax
movl $(BOOT_INIT_PGT_SIZE/4), %ecx movl $(BOOT_INIT_PGT_SIZE/4), %ecx
rep stosl rep stosl
/* Build Level 4 */ /* Build Level 4 */
leal pgtable + 0(%ebx), %edi leal rva(pgtable + 0)(%ebx), %edi
leal 0x1007 (%edi), %eax leal 0x1007 (%edi), %eax
movl %eax, 0(%edi) movl %eax, 0(%edi)
addl %edx, 4(%edi) addl %edx, 4(%edi)
/* Build Level 3 */ /* Build Level 3 */
leal pgtable + 0x1000(%ebx), %edi leal rva(pgtable + 0x1000)(%ebx), %edi
leal 0x1007(%edi), %eax leal 0x1007(%edi), %eax
movl $4, %ecx movl $4, %ecx
1: movl %eax, 0x00(%edi) 1: movl %eax, 0x00(%edi)
...@@ -177,7 +201,7 @@ SYM_FUNC_START(startup_32) ...@@ -177,7 +201,7 @@ SYM_FUNC_START(startup_32)
jnz 1b jnz 1b
/* Build Level 2 */ /* Build Level 2 */
leal pgtable + 0x2000(%ebx), %edi leal rva(pgtable + 0x2000)(%ebx), %edi
movl $0x00000183, %eax movl $0x00000183, %eax
movl $2048, %ecx movl $2048, %ecx
1: movl %eax, 0(%edi) 1: movl %eax, 0(%edi)
...@@ -188,7 +212,7 @@ SYM_FUNC_START(startup_32) ...@@ -188,7 +212,7 @@ SYM_FUNC_START(startup_32)
jnz 1b jnz 1b
/* Enable the boot page tables */ /* Enable the boot page tables */
leal pgtable(%ebx), %eax leal rva(pgtable)(%ebx), %eax
movl %eax, %cr3 movl %eax, %cr3
/* Enable Long mode in EFER (Extended Feature Enable Register) */ /* Enable Long mode in EFER (Extended Feature Enable Register) */
...@@ -213,14 +237,14 @@ SYM_FUNC_START(startup_32) ...@@ -213,14 +237,14 @@ SYM_FUNC_START(startup_32)
* We place all of the values on our mini stack so lret can * We place all of the values on our mini stack so lret can
* used to perform that far jump. * used to perform that far jump.
*/ */
leal startup_64(%ebp), %eax leal rva(startup_64)(%ebp), %eax
#ifdef CONFIG_EFI_MIXED #ifdef CONFIG_EFI_MIXED
movl efi32_boot_args(%ebp), %edi movl rva(efi32_boot_args)(%ebp), %edi
cmp $0, %edi cmp $0, %edi
jz 1f jz 1f
leal efi64_stub_entry(%ebp), %eax leal rva(efi64_stub_entry)(%ebp), %eax
movl efi32_boot_args+4(%ebp), %esi movl rva(efi32_boot_args+4)(%ebp), %esi
movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer
cmpl $0, %edx cmpl $0, %edx
jnz 1f jnz 1f
/* /*
...@@ -231,7 +255,7 @@ SYM_FUNC_START(startup_32) ...@@ -231,7 +255,7 @@ SYM_FUNC_START(startup_32)
* the correct stack alignment for entry. * the correct stack alignment for entry.
*/ */
subl $40, %esp subl $40, %esp
leal efi_pe_entry(%ebp), %eax leal rva(efi_pe_entry)(%ebp), %eax
movl %edi, %ecx // MS calling convention movl %edi, %ecx // MS calling convention
movl %esi, %edx movl %esi, %edx
1: 1:
...@@ -257,18 +281,18 @@ SYM_FUNC_START(efi32_stub_entry) ...@@ -257,18 +281,18 @@ SYM_FUNC_START(efi32_stub_entry)
call 1f call 1f
1: pop %ebp 1: pop %ebp
subl $1b, %ebp subl $ rva(1b), %ebp
movl %esi, efi32_boot_args+8(%ebp) movl %esi, rva(efi32_boot_args+8)(%ebp)
SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL) SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
movl %ecx, efi32_boot_args(%ebp) movl %ecx, rva(efi32_boot_args)(%ebp)
movl %edx, efi32_boot_args+4(%ebp) movl %edx, rva(efi32_boot_args+4)(%ebp)
movb $0, efi_is64(%ebp) movb $0, rva(efi_is64)(%ebp)
/* Save firmware GDTR and code/data selectors */ /* Save firmware GDTR and code/data selectors */
sgdtl efi32_boot_gdt(%ebp) sgdtl rva(efi32_boot_gdt)(%ebp)
movw %cs, efi32_boot_cs(%ebp) movw %cs, rva(efi32_boot_cs)(%ebp)
movw %ds, efi32_boot_ds(%ebp) movw %ds, rva(efi32_boot_ds)(%ebp)
/* Disable paging */ /* Disable paging */
movl %cr0, %eax movl %cr0, %eax
...@@ -347,30 +371,11 @@ SYM_CODE_START(startup_64) ...@@ -347,30 +371,11 @@ SYM_CODE_START(startup_64)
/* Target address to relocate to for decompression */ /* Target address to relocate to for decompression */
movl BP_init_size(%rsi), %ebx movl BP_init_size(%rsi), %ebx
subl $_end, %ebx subl $ rva(_end), %ebx
addq %rbp, %rbx addq %rbp, %rbx
/* Set up the stack */ /* Set up the stack */
leaq boot_stack_end(%rbx), %rsp leaq rva(boot_stack_end)(%rbx), %rsp
/*
* paging_prepare() and cleanup_trampoline() below can have GOT
* references. Adjust the table with address we are running at.
*
* Zero RAX for adjust_got: the GOT was not adjusted before;
* there's no adjustment to undo.
*/
xorq %rax, %rax
/*
* Calculate the address the binary is loaded at and use it as
* a GOT adjustment.
*/
call 1f
1: popq %rdi
subq $1b, %rdi
call .Ladjust_got
/* /*
* At this point we are in long mode with 4-level paging enabled, * At this point we are in long mode with 4-level paging enabled,
...@@ -444,7 +449,7 @@ SYM_CODE_START(startup_64) ...@@ -444,7 +449,7 @@ SYM_CODE_START(startup_64)
lretq lretq
trampoline_return: trampoline_return:
/* Restore the stack, the 32-bit trampoline uses its own stack */ /* Restore the stack, the 32-bit trampoline uses its own stack */
leaq boot_stack_end(%rbx), %rsp leaq rva(boot_stack_end)(%rbx), %rsp
/* /*
* cleanup_trampoline() would restore trampoline memory. * cleanup_trampoline() would restore trampoline memory.
...@@ -456,7 +461,7 @@ trampoline_return: ...@@ -456,7 +461,7 @@ trampoline_return:
* this function call. * this function call.
*/ */
pushq %rsi pushq %rsi
leaq top_pgtable(%rbx), %rdi leaq rva(top_pgtable)(%rbx), %rdi
call cleanup_trampoline call cleanup_trampoline
popq %rsi popq %rsi
...@@ -464,30 +469,15 @@ trampoline_return: ...@@ -464,30 +469,15 @@ trampoline_return:
pushq $0 pushq $0
popfq popfq
/*
* Previously we've adjusted the GOT with address the binary was
* loaded at. Now we need to re-adjust for relocation address.
*
* Calculate the address the binary is loaded at, so that we can
* undo the previous GOT adjustment.
*/
call 1f
1: popq %rax
subq $1b, %rax
/* The new adjustment is the relocation address */
movq %rbx, %rdi
call .Ladjust_got
/* /*
* Copy the compressed kernel to the end of our buffer * Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe. * where decompression in place becomes safe.
*/ */
pushq %rsi pushq %rsi
leaq (_bss-8)(%rip), %rsi leaq (_bss-8)(%rip), %rsi
leaq (_bss-8)(%rbx), %rdi leaq rva(_bss-8)(%rbx), %rdi
movq $_bss /* - $startup_32 */, %rcx movl $(_bss - startup_32), %ecx
shrq $3, %rcx shrl $3, %ecx
std std
rep movsq rep movsq
cld cld
...@@ -498,15 +488,15 @@ trampoline_return: ...@@ -498,15 +488,15 @@ trampoline_return:
* during extract_kernel below. To avoid any issues, repoint the GDTR * during extract_kernel below. To avoid any issues, repoint the GDTR
* to the new copy of the GDT. * to the new copy of the GDT.
*/ */
leaq gdt64(%rbx), %rax leaq rva(gdt64)(%rbx), %rax
leaq gdt(%rbx), %rdx leaq rva(gdt)(%rbx), %rdx
movq %rdx, 2(%rax) movq %rdx, 2(%rax)
lgdt (%rax) lgdt (%rax)
/* /*
* Jump to the relocated address. * Jump to the relocated address.
*/ */
leaq .Lrelocated(%rbx), %rax leaq rva(.Lrelocated)(%rbx), %rax
jmp *%rax jmp *%rax
SYM_CODE_END(startup_64) SYM_CODE_END(startup_64)
...@@ -518,7 +508,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry) ...@@ -518,7 +508,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry)
movq %rdx, %rbx /* save boot_params pointer */ movq %rdx, %rbx /* save boot_params pointer */
call efi_main call efi_main
movq %rbx,%rsi movq %rbx,%rsi
leaq startup_64(%rax), %rax leaq rva(startup_64)(%rax), %rax
jmp *%rax jmp *%rax
SYM_FUNC_END(efi64_stub_entry) SYM_FUNC_END(efi64_stub_entry)
SYM_FUNC_END_ALIAS(efi_stub_entry) SYM_FUNC_END_ALIAS(efi_stub_entry)
...@@ -544,9 +534,9 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) ...@@ -544,9 +534,9 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
movq %rsi, %rdi /* real mode address */ movq %rsi, %rdi /* real mode address */
leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
leaq input_data(%rip), %rdx /* input_data */ leaq input_data(%rip), %rdx /* input_data */
movl $z_input_len, %ecx /* input_len */ movl input_len(%rip), %ecx /* input_len */
movq %rbp, %r8 /* output target address */ movq %rbp, %r8 /* output target address */
movl $z_output_len, %r9d /* decompressed length, end of relocs */ movl output_len(%rip), %r9d /* decompressed length, end of relocs */
call extract_kernel /* returns kernel location in %rax */ call extract_kernel /* returns kernel location in %rax */
popq %rsi popq %rsi
...@@ -556,27 +546,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) ...@@ -556,27 +546,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
jmp *%rax jmp *%rax
SYM_FUNC_END(.Lrelocated) SYM_FUNC_END(.Lrelocated)
/*
* Adjust the global offset table
*
* RAX is the previous adjustment of the table to undo (use 0 if it's the
* first time we touch GOT).
* RDI is the new adjustment to apply.
*/
.Ladjust_got:
/* Walk through the GOT adding the address to the entries */
leaq _got(%rip), %rdx
leaq _egot(%rip), %rcx
1:
cmpq %rcx, %rdx
jae 2f
subq %rax, (%rdx) /* Undo previous adjustment */
addq %rdi, (%rdx) /* Apply the new adjustment */
addq $8, %rdx
jmp 1b
2:
ret
.code32 .code32
/* /*
* This is the 32-bit trampoline that will be copied over to low memory. * This is the 32-bit trampoline that will be copied over to low memory.
...@@ -702,7 +671,7 @@ SYM_DATA(efi_is64, .byte 1) ...@@ -702,7 +671,7 @@ SYM_DATA(efi_is64, .byte 1)
#define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol) #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
#define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base) #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
.text __HEAD
.code32 .code32
SYM_FUNC_START(efi32_pe_entry) SYM_FUNC_START(efi32_pe_entry)
/* /*
...@@ -724,12 +693,12 @@ SYM_FUNC_START(efi32_pe_entry) ...@@ -724,12 +693,12 @@ SYM_FUNC_START(efi32_pe_entry)
call 1f call 1f
1: pop %ebx 1: pop %ebx
subl $1b, %ebx subl $ rva(1b), %ebx
/* Get the loaded image protocol pointer from the image handle */ /* Get the loaded image protocol pointer from the image handle */
leal -4(%ebp), %eax leal -4(%ebp), %eax
pushl %eax // &loaded_image pushl %eax // &loaded_image
leal loaded_image_proto(%ebx), %eax leal rva(loaded_image_proto)(%ebx), %eax
pushl %eax // pass the GUID address pushl %eax // pass the GUID address
pushl 8(%ebp) // pass the image handle pushl 8(%ebp) // pass the image handle
...@@ -764,7 +733,7 @@ SYM_FUNC_START(efi32_pe_entry) ...@@ -764,7 +733,7 @@ SYM_FUNC_START(efi32_pe_entry)
* use it before we get to the 64-bit efi_pe_entry() in C code. * use it before we get to the 64-bit efi_pe_entry() in C code.
*/ */
subl %esi, %ebx subl %esi, %ebx
movl %ebx, image_offset(%ebp) // save image_offset movl %ebx, rva(image_offset)(%ebp) // save image_offset
jmp efi32_pe_stub_entry jmp efi32_pe_stub_entry
2: popl %edi // restore callee-save registers 2: popl %edi // restore callee-save registers
......
...@@ -60,6 +60,12 @@ int main(int argc, char *argv[]) ...@@ -60,6 +60,12 @@ int main(int argc, char *argv[])
printf(".incbin \"%s\"\n", argv[1]); printf(".incbin \"%s\"\n", argv[1]);
printf("input_data_end:\n"); printf("input_data_end:\n");
printf(".section \".rodata\",\"a\",@progbits\n");
printf(".globl input_len\n");
printf("input_len:\n\t.long %lu\n", ilen);
printf(".globl output_len\n");
printf("output_len:\n\t.long %lu\n", (unsigned long)olen);
retval = 0; retval = 0;
bail: bail:
if (f) if (f)
......
...@@ -42,12 +42,6 @@ SECTIONS ...@@ -42,12 +42,6 @@ SECTIONS
*(.rodata.*) *(.rodata.*)
_erodata = . ; _erodata = . ;
} }
.got : {
_got = .;
KEEP(*(.got.plt))
KEEP(*(.got))
_egot = .;
}
.data : { .data : {
_data = . ; _data = . ;
*(.data) *(.data)
...@@ -75,5 +69,49 @@ SECTIONS ...@@ -75,5 +69,49 @@ SECTIONS
. = ALIGN(PAGE_SIZE); /* keep ZO size page aligned */ . = ALIGN(PAGE_SIZE); /* keep ZO size page aligned */
_end = .; _end = .;
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
/DISCARD/ : {
*(.dynamic) *(.dynsym) *(.dynstr) *(.dynbss)
*(.hash) *(.gnu.hash)
*(.note.*)
}
.got.plt (INFO) : {
*(.got.plt)
}
ASSERT(SIZEOF(.got.plt) == 0 ||
#ifdef CONFIG_X86_64
SIZEOF(.got.plt) == 0x18,
#else
SIZEOF(.got.plt) == 0xc,
#endif
"Unexpected GOT/PLT entries detected!")
/*
* Sections that should stay zero sized, which is safer to
* explicitly check instead of blindly discarding.
*/
.got : {
*(.got)
}
ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
.plt : {
*(.plt) *(.plt.*)
}
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
.rel.dyn : {
*(.rel.*) *(.rel_*)
}
ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
.rela.dyn : {
*(.rela.*) *(.rela_*)
}
ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
} }
...@@ -20,7 +20,7 @@ SECTIONS ...@@ -20,7 +20,7 @@ SECTIONS
.initdata : { *(.initdata) } .initdata : { *(.initdata) }
__end_init = .; __end_init = .;
.text : { *(.text) } .text : { *(.text .text.*) }
.text32 : { *(.text32) } .text32 : { *(.text32) }
. = ALIGN(16); . = ALIGN(16);
......
...@@ -141,11 +141,15 @@ ...@@ -141,11 +141,15 @@
# define _ASM_EXTABLE_FAULT(from, to) \ # define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
# define _ASM_NOKPROBE(entry) \ # ifdef CONFIG_KPROBES
# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \ .pushsection "_kprobe_blacklist","aw" ; \
_ASM_ALIGN ; \ _ASM_ALIGN ; \
_ASM_PTR (entry); \ _ASM_PTR (entry); \
.popsection .popsection
# else
# define _ASM_NOKPROBE(entry)
# endif
#else /* ! __ASSEMBLY__ */ #else /* ! __ASSEMBLY__ */
# define _EXPAND_EXTABLE_HANDLE(x) #x # define _EXPAND_EXTABLE_HANDLE(x) #x
......
...@@ -411,10 +411,47 @@ SECTIONS ...@@ -411,10 +411,47 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
DWARF_DEBUG DWARF_DEBUG
ELF_DETAILS
DISCARDS DISCARDS
}
/*
* Make sure that the .got.plt is either completely empty or it
* contains only the lazy dispatch entries.
*/
.got.plt (INFO) : { *(.got.plt) }
ASSERT(SIZEOF(.got.plt) == 0 ||
#ifdef CONFIG_X86_64
SIZEOF(.got.plt) == 0x18,
#else
SIZEOF(.got.plt) == 0xc,
#endif
"Unexpected GOT/PLT entries detected!")
/*
* Sections that should stay zero sized, which is safer to
* explicitly check instead of blindly discarding.
*/
.got : {
*(.got) *(.igot.*)
}
ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
.plt : {
*(.plt) *(.plt.*) *(.iplt)
}
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
.rel.dyn : {
*(.rel.*) *(.rel_*)
}
ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
.rela.dyn : {
*(.rela.*) *(.rela_*)
}
ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
}
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
......
...@@ -18,7 +18,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ ...@@ -18,7 +18,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin # disable the stackleak plugin
cflags-$(CONFIG_ARM64) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ cflags-$(CONFIG_ARM64) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fpie $(DISABLE_STACKLEAK_PLUGIN) -fpie $(DISABLE_STACKLEAK_PLUGIN) \
$(call cc-option,-mbranch-protection=none)
cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic \ -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base) $(call cc-option,-mno-single-pic-base)
...@@ -26,7 +27,7 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ ...@@ -26,7 +27,7 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
-include $(srctree)/drivers/firmware/efi/libstub/hidden.h \ -include $(srctree)/include/linux/hidden.h \
-D__NO_FORTIFY \ -D__NO_FORTIFY \
-ffreestanding \ -ffreestanding \
-fno-stack-protector \ -fno-stack-protector \
...@@ -65,6 +66,12 @@ lib-$(CONFIG_ARM64) += arm64-stub.o ...@@ -65,6 +66,12 @@ lib-$(CONFIG_ARM64) += arm64-stub.o
lib-$(CONFIG_X86) += x86-stub.o lib-$(CONFIG_X86) += x86-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
# Even when -mbranch-protection=none is set, Clang will generate a
# .note.gnu.property for code-less object files (like lib/ctype.c),
# so work around this by explicitly removing the unwanted section.
# https://bugs.llvm.org/show_bug.cgi?id=46480
STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property
# #
# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the # For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
# .bss section, so the .bss section of the EFI stub needs to be included in the # .bss section, so the .bss section of the EFI stub needs to be included in the
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* To prevent the compiler from emitting GOT-indirected (and thus absolute)
* references to any global symbols, override their visibility as 'hidden'
*/
#pragma GCC visibility push(hidden)
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
* *
* STABS_DEBUG * STABS_DEBUG
* DWARF_DEBUG * DWARF_DEBUG
* ELF_DETAILS
* *
* DISCARDS // must be the last * DISCARDS // must be the last
* } * }
...@@ -581,7 +582,10 @@ ...@@ -581,7 +582,10 @@
*/ */
#define TEXT_TEXT \ #define TEXT_TEXT \
ALIGN_FUNCTION(); \ ALIGN_FUNCTION(); \
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ *(.text.hot .text.hot.*) \
*(TEXT_MAIN .text.fixup) \
*(.text.unlikely .text.unlikely.*) \
*(.text.unknown .text.unknown.*) \
NOINSTR_TEXT \ NOINSTR_TEXT \
*(.text..refcount) \ *(.text..refcount) \
*(.ref.text) \ *(.ref.text) \
...@@ -812,15 +816,21 @@ ...@@ -812,15 +816,21 @@
.debug_macro 0 : { *(.debug_macro) } \ .debug_macro 0 : { *(.debug_macro) } \
.debug_addr 0 : { *(.debug_addr) } .debug_addr 0 : { *(.debug_addr) }
/* Stabs debugging sections. */ /* Stabs debugging sections. */
#define STABS_DEBUG \ #define STABS_DEBUG \
.stab 0 : { *(.stab) } \ .stab 0 : { *(.stab) } \
.stabstr 0 : { *(.stabstr) } \ .stabstr 0 : { *(.stabstr) } \
.stab.excl 0 : { *(.stab.excl) } \ .stab.excl 0 : { *(.stab.excl) } \
.stab.exclstr 0 : { *(.stab.exclstr) } \ .stab.exclstr 0 : { *(.stab.exclstr) } \
.stab.index 0 : { *(.stab.index) } \ .stab.index 0 : { *(.stab.index) } \
.stab.indexstr 0 : { *(.stab.indexstr) } \ .stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/* Required sections not related to debugging. */
#define ELF_DETAILS \
.comment 0 : { *(.comment) } \
.symtab 0 : { *(.symtab) } \
.strtab 0 : { *(.strtab) } \
.shstrtab 0 : { *(.shstrtab) }
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG
#define BUG_TABLE \ #define BUG_TABLE \
...@@ -955,13 +965,38 @@ ...@@ -955,13 +965,38 @@
EXIT_DATA EXIT_DATA
#endif #endif
/*
* Clang's -fsanitize=kernel-address and -fsanitize=thread produce
* unwanted sections (.eh_frame and .init_array.*), but
* CONFIG_CONSTRUCTORS wants to keep any .init_array.* sections.
* https://bugs.llvm.org/show_bug.cgi?id=46478
*/
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
# ifdef CONFIG_CONSTRUCTORS
# define SANITIZER_DISCARDS \
*(.eh_frame)
# else
# define SANITIZER_DISCARDS \
*(.init_array) *(.init_array.*) \
*(.eh_frame)
# endif
#else
# define SANITIZER_DISCARDS
#endif
#define COMMON_DISCARDS \
SANITIZER_DISCARDS \
*(.discard) \
*(.discard.*) \
*(.modinfo) \
/* ld.bfd warns about .gnu.version* even when not emitted */ \
*(.gnu.version*) \
#define DISCARDS \ #define DISCARDS \
/DISCARD/ : { \ /DISCARD/ : { \
EXIT_DISCARDS \ EXIT_DISCARDS \
EXIT_CALL \ EXIT_CALL \
*(.discard) \ COMMON_DISCARDS \
*(.discard.*) \
*(.modinfo) \
} }
/** /**
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* When building position independent code with GCC using the -fPIC option,
* (or even the -fPIE one on older versions), it will assume that we are
* building a dynamic object (either a shared library or an executable) that
* may have symbol references that can only be resolved at load time. For a
* variety of reasons (ELF symbol preemption, the CoW footprint of the section
* that is modified by the loader), this results in all references to symbols
* with external linkage to go via entries in the Global Offset Table (GOT),
* which carries absolute addresses which need to be fixed up when the
* executable image is loaded at an offset which is different from its link
* time offset.
*
* Fortunately, there is a way to inform the compiler that such symbol
* references will be satisfied at link time rather than at load time, by
* giving them 'hidden' visibility.
*/
#pragma GCC visibility push(hidden)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment