Commit 4eb77fa1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_boot_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 boot updates from Borislav Petkov:
 "A  of early boot cleanups and fixes.

   - Do some spring cleaning to the compressed boot code by moving the
     EFI mixed-mode code to a separate compilation unit, the AMD memory
     encryption early code where it belongs and fixing up build
     dependencies. Make the deprecated EFI handover protocol optional
     with the goal of removing it at some point (Ard Biesheuvel)

   - Skip realmode init code on Xen PV guests as it is not needed there

   - Remove an old 32-bit PIC code compiler workaround"

* tag 'x86_boot_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/boot: Remove x86_32 PIC using %ebx workaround
  x86/boot: Skip realmode init code when running as Xen PV guest
  x86/efi: Make the deprecated EFI handover protocol optional
  x86/boot/compressed: Only build mem_encrypt.S if AMD_MEM_ENCRYPT=y
  x86/boot/compressed: Adhere to calling convention in get_sev_encryption_bit()
  x86/boot/compressed: Move startup32_check_sev_cbit() out of head_64.S
  x86/boot/compressed: Move startup32_check_sev_cbit() into .text
  x86/boot/compressed: Move startup32_load_idt() out of head_64.S
  x86/boot/compressed: Move startup32_load_idt() into .text section
  x86/boot/compressed: Pull global variable reference into startup32_load_idt()
  x86/boot/compressed: Avoid touching ECX in startup32_set_idt_entry()
  x86/boot/compressed: Simplify IDT/GDT preserve/restore in the EFI thunk
  x86/boot/compressed, efi: Merge multiple definitions of image_offset into one
  x86/boot/compressed: Move efi32_pe_entry() out of head_64.S
  x86/boot/compressed: Move efi32_entry out of head_64.S
  x86/boot/compressed: Move efi32_pe_entry into .text section
  x86/boot/compressed: Move bootargs parsing out of 32-bit startup code
  x86/boot/compressed: Move 32-bit entrypoint code into .text section
  x86/boot/compressed: Rename efi_thunk_64.S to efi-mixed.S
parents 8b9ed79c 60253f10
......@@ -1981,6 +1981,23 @@ config EFI_STUB
See Documentation/admin-guide/efi-stub.rst for more information.
config EFI_HANDOVER_PROTOCOL
bool "EFI handover protocol (DEPRECATED)"
depends on EFI_STUB
default y
help
Select this in order to include support for the deprecated EFI
handover protocol, which defines alternative entry points into the
EFI stub. This is a practice that has no basis in the UEFI
specification, and requires a priori knowledge on the part of the
bootloader about Linux/x86 specific ways of passing the command line
and initrd, and where in memory those assets may be loaded.
If in doubt, say Y. Even though the corresponding support is not
present in upstream GRUB or other bootloaders, most distros build
GRUB with numerous downstream patches applied, and may rely on the
handover protocol as as result.
config EFI_MIXED
bool "EFI mixed-mode support"
depends on EFI_STUB && X86_64
......
......@@ -100,7 +100,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
ifdef CONFIG_X86_64
vmlinux-objs-y += $(obj)/ident_map_64.o
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
vmlinux-objs-y += $(obj)/mem_encrypt.o
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
endif
......@@ -108,11 +108,11 @@ endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
......
......@@ -22,6 +22,49 @@
.code64
.text
/*
* When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode()
* is the first thing that runs after switching to long mode. Depending on
* whether the EFI handover protocol or the compat entry point was used to
* enter the kernel, it will either branch to the 64-bit EFI handover
* entrypoint at offset 0x390 in the image, or to the 64-bit EFI PE/COFF
* entrypoint efi_pe_entry(). In the former case, the bootloader must provide a
* struct bootparams pointer as the third argument, so the presence of such a
* pointer is used to disambiguate.
*
* +--------------+
* +------------------+ +------------+ +------>| efi_pe_entry |
* | efi32_pe_entry |---->| | | +-----------+--+
* +------------------+ | | +------+----------------+ |
* | startup_32 |---->| startup_64_mixed_mode | |
* +------------------+ | | +------+----------------+ V
* | efi32_stub_entry |---->| | | +------------------+
* +------------------+ +------------+ +---->| efi64_stub_entry |
* +-------------+----+
* +------------+ +----------+ |
* | startup_64 |<----| efi_main |<--------------+
* +------------+ +----------+
*/
SYM_FUNC_START(startup_64_mixed_mode)
lea efi32_boot_args(%rip), %rdx
mov 0(%rdx), %edi
mov 4(%rdx), %esi
mov 8(%rdx), %edx // saved bootparams pointer
test %edx, %edx
jnz efi64_stub_entry
/*
* efi_pe_entry uses MS calling convention, which requires 32 bytes of
* shadow space on the stack even if all arguments are passed in
* registers. We also need an additional 8 bytes for the space that
* would be occupied by the return address, and this also results in
* the correct stack alignment for entry.
*/
sub $40, %rsp
mov %rdi, %rcx // MS calling convention
mov %rsi, %rdx
jmp efi_pe_entry
SYM_FUNC_END(startup_64_mixed_mode)
SYM_FUNC_START(__efi64_thunk)
push %rbp
push %rbx
......@@ -53,24 +96,20 @@ SYM_FUNC_START(__efi64_thunk)
leaq 0x20(%rsp), %rbx
sgdt (%rbx)
addq $16, %rbx
sidt (%rbx)
sidt 16(%rbx)
leaq 1f(%rip), %rbp
/*
* Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
* and IDT that was installed when the kernel started executing. The
* pointers were saved at the EFI stub entry point in head_64.S.
* Switch to IDT and GDT with 32-bit segments. These are the firmware
* GDT and IDT that were installed when the kernel started executing.
* The pointers were saved by the efi32_entry() routine below.
*
* Pass the saved DS selector to the 32-bit code, and use far return to
* restore the saved CS selector.
*/
leaq efi32_boot_idt(%rip), %rax
lidt (%rax)
leaq efi32_boot_gdt(%rip), %rax
lgdt (%rax)
lidt efi32_boot_idt(%rip)
lgdt efi32_boot_gdt(%rip)
movzwl efi32_boot_ds(%rip), %edx
movzwq efi32_boot_cs(%rip), %rax
......@@ -138,9 +177,7 @@ SYM_FUNC_START_LOCAL(efi_enter32)
*/
cli
lidtl (%ebx)
subl $16, %ebx
lidtl 16(%ebx)
lgdtl (%ebx)
movl %cr4, %eax
......@@ -168,22 +205,141 @@ SYM_FUNC_START_LOCAL(efi_enter32)
lret
SYM_FUNC_END(efi_enter32)
/*
* This is the common EFI stub entry point for mixed mode.
*
* Arguments: %ecx image handle
* %edx EFI system table pointer
* %esi struct bootparams pointer (or NULL when not using
* the EFI handover protocol)
*
* Since this is the point of no return for ordinary execution, no registers
* are considered live except for the function parameters. [Note that the EFI
* stub may still exit and return to the firmware using the Exit() EFI boot
* service.]
*/
SYM_FUNC_START(efi32_entry)
call 1f
1: pop %ebx
/* Save firmware GDTR and code/data selectors */
sgdtl (efi32_boot_gdt - 1b)(%ebx)
movw %cs, (efi32_boot_cs - 1b)(%ebx)
movw %ds, (efi32_boot_ds - 1b)(%ebx)
/* Store firmware IDT descriptor */
sidtl (efi32_boot_idt - 1b)(%ebx)
/* Store boot arguments */
leal (efi32_boot_args - 1b)(%ebx), %ebx
movl %ecx, 0(%ebx)
movl %edx, 4(%ebx)
movl %esi, 8(%ebx)
movb $0x0, 12(%ebx) // efi_is64
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
jmp startup_32
SYM_FUNC_END(efi32_entry)
#define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
#define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
#define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
/*
* efi_status_t efi32_pe_entry(efi_handle_t image_handle,
* efi_system_table_32_t *sys_table)
*/
SYM_FUNC_START(efi32_pe_entry)
pushl %ebp
movl %esp, %ebp
pushl %eax // dummy push to allocate loaded_image
pushl %ebx // save callee-save registers
pushl %edi
call verify_cpu // check for long mode support
testl %eax, %eax
movl $0x80000003, %eax // EFI_UNSUPPORTED
jnz 2f
call 1f
1: pop %ebx
/* Get the loaded image protocol pointer from the image handle */
leal -4(%ebp), %eax
pushl %eax // &loaded_image
leal (loaded_image_proto - 1b)(%ebx), %eax
pushl %eax // pass the GUID address
pushl 8(%ebp) // pass the image handle
/*
* Note the alignment of the stack frame.
* sys_table
* handle <-- 16-byte aligned on entry by ABI
* return address
* frame pointer
* loaded_image <-- local variable
* saved %ebx <-- 16-byte aligned here
* saved %edi
* &loaded_image
* &loaded_image_proto
* handle <-- 16-byte aligned for call to handle_protocol
*/
movl 12(%ebp), %eax // sys_table
movl ST32_boottime(%eax), %eax // sys_table->boottime
call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
addl $12, %esp // restore argument space
testl %eax, %eax
jnz 2f
movl 8(%ebp), %ecx // image_handle
movl 12(%ebp), %edx // sys_table
movl -4(%ebp), %esi // loaded_image
movl LI32_image_base(%esi), %esi // loaded_image->image_base
leal (startup_32 - 1b)(%ebx), %ebp // runtime address of startup_32
/*
* We need to set the image_offset variable here since startup_32() will
* use it before we get to the 64-bit efi_pe_entry() in C code.
*/
subl %esi, %ebp // calculate image_offset
movl %ebp, (image_offset - 1b)(%ebx) // save image_offset
xorl %esi, %esi
jmp efi32_entry // pass %ecx, %edx, %esi
// no other registers remain live
2: popl %edi // restore callee-save registers
popl %ebx
leave
RET
SYM_FUNC_END(efi32_pe_entry)
.section ".rodata"
/* EFI loaded image protocol GUID */
.balign 4
SYM_DATA_START_LOCAL(loaded_image_proto)
.long 0x5b1b31a1
.word 0x9562, 0x11d2
.byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
SYM_DATA_END(loaded_image_proto)
.data
.balign 8
SYM_DATA_START(efi32_boot_gdt)
SYM_DATA_START_LOCAL(efi32_boot_gdt)
.word 0
.quad 0
SYM_DATA_END(efi32_boot_gdt)
SYM_DATA_START(efi32_boot_idt)
SYM_DATA_START_LOCAL(efi32_boot_idt)
.word 0
.quad 0
SYM_DATA_END(efi32_boot_idt)
SYM_DATA_START(efi32_boot_cs)
.word 0
SYM_DATA_END(efi32_boot_cs)
SYM_DATA_START(efi32_boot_ds)
.word 0
SYM_DATA_END(efi32_boot_ds)
SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
......@@ -208,10 +208,6 @@ SYM_DATA_START_LOCAL(gdt)
.quad 0x00cf92000000ffff /* __KERNEL_DS */
SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
#ifdef CONFIG_EFI_STUB
SYM_DATA(image_offset, .long 0)
#endif
/*
* Stack and heap for uncompression
*/
......
......@@ -118,7 +118,9 @@ SYM_FUNC_START(startup_32)
1:
/* Setup Exception handling for SEV-ES */
#ifdef CONFIG_AMD_MEM_ENCRYPT
call startup32_load_idt
#endif
/* Make sure cpu supports long mode. */
call verify_cpu
......@@ -178,12 +180,13 @@ SYM_FUNC_START(startup_32)
*/
/*
* If SEV is active then set the encryption mask in the page tables.
* This will insure that when the kernel is copied and decompressed
* This will ensure that when the kernel is copied and decompressed
* it will be done so encrypted.
*/
call get_sev_encryption_bit
xorl %edx, %edx
#ifdef CONFIG_AMD_MEM_ENCRYPT
call get_sev_encryption_bit
xorl %edx, %edx
testl %eax, %eax
jz 1f
subl $32, %eax /* Encryption bit is always above bit 31 */
......@@ -249,6 +252,11 @@ SYM_FUNC_START(startup_32)
movl $__BOOT_TSS, %eax
ltr %ax
#ifdef CONFIG_AMD_MEM_ENCRYPT
/* Check if the C-bit position is correct when SEV is active */
call startup32_check_sev_cbit
#endif
/*
* Setup for the jump to 64bit mode
*
......@@ -261,29 +269,11 @@ SYM_FUNC_START(startup_32)
*/
leal rva(startup_64)(%ebp), %eax
#ifdef CONFIG_EFI_MIXED
movl rva(efi32_boot_args)(%ebp), %edi
testl %edi, %edi
jz 1f
leal rva(efi64_stub_entry)(%ebp), %eax
movl rva(efi32_boot_args+4)(%ebp), %esi
movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer
testl %edx, %edx
jnz 1f
/*
* efi_pe_entry uses MS calling convention, which requires 32 bytes of
* shadow space on the stack even if all arguments are passed in
* registers. We also need an additional 8 bytes for the space that
* would be occupied by the return address, and this also results in
* the correct stack alignment for entry.
*/
subl $40, %esp
leal rva(efi_pe_entry)(%ebp), %eax
movl %edi, %ecx // MS calling convention
movl %esi, %edx
cmpb $1, rva(efi_is64)(%ebp)
je 1f
leal rva(startup_64_mixed_mode)(%ebp), %eax
1:
#endif
/* Check if the C-bit position is correct when SEV is active */
call startup32_check_sev_cbit
pushl $__KERNEL_CS
pushl %eax
......@@ -296,38 +286,14 @@ SYM_FUNC_START(startup_32)
lret
SYM_FUNC_END(startup_32)
#ifdef CONFIG_EFI_MIXED
#if IS_ENABLED(CONFIG_EFI_MIXED) && IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL)
.org 0x190
SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp /* Discard return address */
popl %ecx
popl %edx
popl %esi
call 1f
1: pop %ebp
subl $ rva(1b), %ebp
movl %esi, rva(efi32_boot_args+8)(%ebp)
SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
movl %ecx, rva(efi32_boot_args)(%ebp)
movl %edx, rva(efi32_boot_args+4)(%ebp)
movb $0, rva(efi_is64)(%ebp)
/* Save firmware GDTR and code/data selectors */
sgdtl rva(efi32_boot_gdt)(%ebp)
movw %cs, rva(efi32_boot_cs)(%ebp)
movw %ds, rva(efi32_boot_ds)(%ebp)
/* Store firmware IDT descriptor */
sidtl rva(efi32_boot_idt)(%ebp)
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
jmp startup_32
jmp efi32_entry
SYM_FUNC_END(efi32_stub_entry)
#endif
......@@ -550,7 +516,9 @@ trampoline_return:
SYM_CODE_END(startup_64)
#ifdef CONFIG_EFI_STUB
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
.org 0x390
#endif
SYM_FUNC_START(efi64_stub_entry)
and $~0xf, %rsp /* realign the stack */
movq %rdx, %rbx /* save boot_params pointer */
......@@ -713,6 +681,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
jmp 1b
SYM_FUNC_END(.Lno_longmode)
.globl verify_cpu
#include "../../kernel/verify_cpu.S"
.data
......@@ -744,242 +713,6 @@ SYM_DATA_START(boot_idt)
.endr
SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
#ifdef CONFIG_AMD_MEM_ENCRYPT
SYM_DATA_START(boot32_idt_desc)
.word boot32_idt_end - boot32_idt - 1
.long 0
SYM_DATA_END(boot32_idt_desc)
.balign 8
SYM_DATA_START(boot32_idt)
.rept 32
.quad 0
.endr
SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end)
#endif
#ifdef CONFIG_EFI_STUB
SYM_DATA(image_offset, .long 0)
#endif
#ifdef CONFIG_EFI_MIXED
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
#define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
#define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
#define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
__HEAD
.code32
SYM_FUNC_START(efi32_pe_entry)
/*
* efi_status_t efi32_pe_entry(efi_handle_t image_handle,
* efi_system_table_32_t *sys_table)
*/
pushl %ebp
movl %esp, %ebp
pushl %eax // dummy push to allocate loaded_image
pushl %ebx // save callee-save registers
pushl %edi
call verify_cpu // check for long mode support
testl %eax, %eax
movl $0x80000003, %eax // EFI_UNSUPPORTED
jnz 2f
call 1f
1: pop %ebx
subl $ rva(1b), %ebx
/* Get the loaded image protocol pointer from the image handle */
leal -4(%ebp), %eax
pushl %eax // &loaded_image
leal rva(loaded_image_proto)(%ebx), %eax
pushl %eax // pass the GUID address
pushl 8(%ebp) // pass the image handle
/*
* Note the alignment of the stack frame.
* sys_table
* handle <-- 16-byte aligned on entry by ABI
* return address
* frame pointer
* loaded_image <-- local variable
* saved %ebx <-- 16-byte aligned here
* saved %edi
* &loaded_image
* &loaded_image_proto
* handle <-- 16-byte aligned for call to handle_protocol
*/
movl 12(%ebp), %eax // sys_table
movl ST32_boottime(%eax), %eax // sys_table->boottime
call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
addl $12, %esp // restore argument space
testl %eax, %eax
jnz 2f
movl 8(%ebp), %ecx // image_handle
movl 12(%ebp), %edx // sys_table
movl -4(%ebp), %esi // loaded_image
movl LI32_image_base(%esi), %esi // loaded_image->image_base
movl %ebx, %ebp // startup_32 for efi32_pe_stub_entry
/*
* We need to set the image_offset variable here since startup_32() will
* use it before we get to the 64-bit efi_pe_entry() in C code.
*/
subl %esi, %ebx
movl %ebx, rva(image_offset)(%ebp) // save image_offset
jmp efi32_pe_stub_entry
2: popl %edi // restore callee-save registers
popl %ebx
leave
RET
SYM_FUNC_END(efi32_pe_entry)
.section ".rodata"
/* EFI loaded image protocol GUID */
.balign 4
SYM_DATA_START_LOCAL(loaded_image_proto)
.long 0x5b1b31a1
.word 0x9562, 0x11d2
.byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
SYM_DATA_END(loaded_image_proto)
#endif
#ifdef CONFIG_AMD_MEM_ENCRYPT
__HEAD
.code32
/*
* Write an IDT entry into boot32_idt
*
* Parameters:
*
* %eax: Handler address
* %edx: Vector number
*
* Physical offset is expected in %ebp
*/
SYM_FUNC_START(startup32_set_idt_entry)
push %ebx
push %ecx
/* IDT entry address to %ebx */
leal rva(boot32_idt)(%ebp), %ebx
shl $3, %edx
addl %edx, %ebx
/* Build IDT entry, lower 4 bytes */
movl %eax, %edx
andl $0x0000ffff, %edx # Target code segment offset [15:0]
movl $__KERNEL32_CS, %ecx # Target code segment selector
shl $16, %ecx
orl %ecx, %edx
/* Store lower 4 bytes to IDT */
movl %edx, (%ebx)
/* Build IDT entry, upper 4 bytes */
movl %eax, %edx
andl $0xffff0000, %edx # Target code segment offset [31:16]
orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate
/* Store upper 4 bytes to IDT */
movl %edx, 4(%ebx)
pop %ecx
pop %ebx
RET
SYM_FUNC_END(startup32_set_idt_entry)
#endif
SYM_FUNC_START(startup32_load_idt)
#ifdef CONFIG_AMD_MEM_ENCRYPT
/* #VC handler */
leal rva(startup32_vc_handler)(%ebp), %eax
movl $X86_TRAP_VC, %edx
call startup32_set_idt_entry
/* Load IDT */
leal rva(boot32_idt)(%ebp), %eax
movl %eax, rva(boot32_idt_desc+2)(%ebp)
lidt rva(boot32_idt_desc)(%ebp)
#endif
RET
SYM_FUNC_END(startup32_load_idt)
/*
* Check for the correct C-bit position when the startup_32 boot-path is used.
*
* The check makes use of the fact that all memory is encrypted when paging is
* disabled. The function creates 64 bits of random data using the RDRAND
* instruction. RDRAND is mandatory for SEV guests, so always available. If the
* hypervisor violates that the kernel will crash right here.
*
* The 64 bits of random data are stored to a memory location and at the same
* time kept in the %eax and %ebx registers. Since encryption is always active
* when paging is off the random data will be stored encrypted in main memory.
*
* Then paging is enabled. When the C-bit position is correct all memory is
* still mapped encrypted and comparing the register values with memory will
* succeed. An incorrect C-bit position will map all memory unencrypted, so that
* the compare will use the encrypted random data and fail.
*/
SYM_FUNC_START(startup32_check_sev_cbit)
#ifdef CONFIG_AMD_MEM_ENCRYPT
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
/* Check for non-zero sev_status */
movl rva(sev_status)(%ebp), %eax
testl %eax, %eax
jz 4f
/*
* Get two 32-bit random values - Don't bail out if RDRAND fails
* because it is better to prevent forward progress if no random value
* can be gathered.
*/
1: rdrand %eax
jnc 1b
2: rdrand %ebx
jnc 2b
/* Store to memory and keep it in the registers */
movl %eax, rva(sev_check_data)(%ebp)
movl %ebx, rva(sev_check_data+4)(%ebp)
/* Enable paging to see if encryption is active */
movl %cr0, %edx /* Backup %cr0 in %edx */
movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
movl %ecx, %cr0
cmpl %eax, rva(sev_check_data)(%ebp)
jne 3f
cmpl %ebx, rva(sev_check_data+4)(%ebp)
jne 3f
movl %edx, %cr0 /* Restore previous %cr0 */
jmp 4f
3: /* Check failed - hlt the machine */
hlt
jmp 3b
4:
popl %edx
popl %ecx
popl %ebx
popl %eax
#endif
RET
SYM_FUNC_END(startup32_check_sev_cbit)
/*
* Stack and heap for uncompression
*/
......
......@@ -12,16 +12,13 @@
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/asm-offsets.h>
#include <asm/segment.h>
#include <asm/trapnr.h>
.text
.code32
SYM_FUNC_START(get_sev_encryption_bit)
xor %eax, %eax
#ifdef CONFIG_AMD_MEM_ENCRYPT
push %ebx
push %ecx
push %edx
movl $0x80000000, %eax /* CPUID to check the highest leaf */
cpuid
......@@ -52,12 +49,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
xor %eax, %eax
.Lsev_exit:
pop %edx
pop %ecx
pop %ebx
#endif /* CONFIG_AMD_MEM_ENCRYPT */
RET
SYM_FUNC_END(get_sev_encryption_bit)
......@@ -98,7 +90,7 @@ SYM_CODE_START_LOCAL(sev_es_req_cpuid)
jmp 1b
SYM_CODE_END(sev_es_req_cpuid)
SYM_CODE_START(startup32_vc_handler)
SYM_CODE_START_LOCAL(startup32_vc_handler)
pushl %eax
pushl %ebx
pushl %ecx
......@@ -184,15 +176,149 @@ SYM_CODE_START(startup32_vc_handler)
jmp .Lfail
SYM_CODE_END(startup32_vc_handler)
/*
* Write an IDT entry into boot32_idt
*
* Parameters:
*
* %eax: Handler address
* %edx: Vector number
* %ecx: IDT address
*/
SYM_FUNC_START_LOCAL(startup32_set_idt_entry)
/* IDT entry address to %ecx */
leal (%ecx, %edx, 8), %ecx
/* Build IDT entry, lower 4 bytes */
movl %eax, %edx
andl $0x0000ffff, %edx # Target code segment offset [15:0]
orl $(__KERNEL32_CS << 16), %edx # Target code segment selector
/* Store lower 4 bytes to IDT */
movl %edx, (%ecx)
/* Build IDT entry, upper 4 bytes */
movl %eax, %edx
andl $0xffff0000, %edx # Target code segment offset [31:16]
orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate
/* Store upper 4 bytes to IDT */
movl %edx, 4(%ecx)
RET
SYM_FUNC_END(startup32_set_idt_entry)
SYM_FUNC_START(startup32_load_idt)
push %ebp
push %ebx
call 1f
1: pop %ebp
leal (boot32_idt - 1b)(%ebp), %ebx
/* #VC handler */
leal (startup32_vc_handler - 1b)(%ebp), %eax
movl $X86_TRAP_VC, %edx
movl %ebx, %ecx
call startup32_set_idt_entry
/* Load IDT */
leal (boot32_idt_desc - 1b)(%ebp), %ecx
movl %ebx, 2(%ecx)
lidt (%ecx)
pop %ebx
pop %ebp
RET
SYM_FUNC_END(startup32_load_idt)
/*
* Check for the correct C-bit position when the startup_32 boot-path is used.
*
* The check makes use of the fact that all memory is encrypted when paging is
* disabled. The function creates 64 bits of random data using the RDRAND
* instruction. RDRAND is mandatory for SEV guests, so always available. If the
* hypervisor violates that the kernel will crash right here.
*
* The 64 bits of random data are stored to a memory location and at the same
* time kept in the %eax and %ebx registers. Since encryption is always active
* when paging is off the random data will be stored encrypted in main memory.
*
* Then paging is enabled. When the C-bit position is correct all memory is
* still mapped encrypted and comparing the register values with memory will
* succeed. An incorrect C-bit position will map all memory unencrypted, so that
* the compare will use the encrypted random data and fail.
*/
SYM_FUNC_START(startup32_check_sev_cbit)
pushl %ebx
pushl %ebp
call 0f
0: popl %ebp
/* Check for non-zero sev_status */
movl (sev_status - 0b)(%ebp), %eax
testl %eax, %eax
jz 4f
/*
* Get two 32-bit random values - Don't bail out if RDRAND fails
* because it is better to prevent forward progress if no random value
* can be gathered.
*/
1: rdrand %eax
jnc 1b
2: rdrand %ebx
jnc 2b
/* Store to memory and keep it in the registers */
leal (sev_check_data - 0b)(%ebp), %ebp
movl %eax, 0(%ebp)
movl %ebx, 4(%ebp)
/* Enable paging to see if encryption is active */
movl %cr0, %edx /* Backup %cr0 in %edx */
movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
movl %ecx, %cr0
cmpl %eax, 0(%ebp)
jne 3f
cmpl %ebx, 4(%ebp)
jne 3f
movl %edx, %cr0 /* Restore previous %cr0 */
jmp 4f
3: /* Check failed - hlt the machine */
hlt
jmp 3b
4:
popl %ebp
popl %ebx
RET
SYM_FUNC_END(startup32_check_sev_cbit)
.code64
#include "../../kernel/sev_verify_cbit.S"
.data
#ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8
SYM_DATA(sme_me_mask, .quad 0)
SYM_DATA(sev_status, .quad 0)
SYM_DATA(sev_check_data, .quad 0)
#endif
SYM_DATA_START_LOCAL(boot32_idt)
.rept 32
.quad 0
.endr
SYM_DATA_END(boot32_idt)
SYM_DATA_START_LOCAL(boot32_idt_desc)
.word . - boot32_idt - 1
.long 0
SYM_DATA_END(boot32_idt_desc)
......@@ -64,20 +64,11 @@ int has_eflag(unsigned long mask)
return !!((f0^f1) & mask);
}
/* Handle x86_32 PIC using ebx. */
#if defined(__i386__) && defined(__PIC__)
# define EBX_REG "=r"
#else
# define EBX_REG "=b"
#endif
void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d)
{
asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t"
"cpuid \n\t"
".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t"
: "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
: "a" (id), "c" (count)
asm volatile("cpuid"
: "=a" (*a), "=b" (*b), "=c" (*c), "=d" (*d)
: "0" (id), "2" (count)
);
}
......
......@@ -407,7 +407,7 @@ xloadflags:
# define XLF1 0
#endif
#ifdef CONFIG_EFI_STUB
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
# ifdef CONFIG_EFI_MIXED
# define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64)
# else
......
......@@ -290,6 +290,7 @@ static void efi_stub_entry_update(void)
{
unsigned long addr = efi32_stub_entry;
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
#ifdef CONFIG_X86_64
/* Yes, this is really how we defined it :( */
addr = efi64_stub_entry - 0x200;
......@@ -298,6 +299,7 @@ static void efi_stub_entry_update(void)
#ifdef CONFIG_EFI_MIXED
if (efi32_stub_entry != addr)
die("32-bit and 64-bit EFI entry points do not match\n");
#endif
#endif
put_unaligned_le32(addr, &buf[0x264]);
}
......
......@@ -91,6 +91,7 @@ static inline void set_real_mode_mem(phys_addr_t mem)
void reserve_real_mode(void);
void load_trampoline_pgtable(void);
void init_real_mode(void);
#endif /* __ASSEMBLY__ */
......
......@@ -285,6 +285,8 @@ struct x86_hyper_runtime {
* possible in x86_early_init_platform_quirks() by
* only using the current x86_hardware_subarch
* semantics.
* @realmode_reserve: reserve memory for realmode trampoline
* @realmode_init: initialize realmode trampoline
* @hyper: x86 hypervisor specific runtime callbacks
*/
struct x86_platform_ops {
......@@ -301,6 +303,8 @@ struct x86_platform_ops {
void (*apic_post_init)(void);
struct x86_legacy_features legacy;
void (*set_legacy_features)(void);
void (*realmode_reserve)(void);
void (*realmode_init)(void);
struct x86_hyper_runtime hyper;
struct x86_guest guest;
};
......
......@@ -1176,7 +1176,7 @@ void __init setup_arch(char **cmdline_p)
* Moreover, on machines with SandyBridge graphics or in setups that use
* crashkernel the entire 1M is reserved anyway.
*/
reserve_real_mode();
x86_platform.realmode_reserve();
init_mem_mapping();
......
......@@ -25,6 +25,7 @@
#include <asm/iommu.h>
#include <asm/mach_traps.h>
#include <asm/irqdomain.h>
#include <asm/realmode.h>
void x86_init_noop(void) { }
void __init x86_init_uint_noop(unsigned int unused) { }
......@@ -145,6 +146,8 @@ struct x86_platform_ops x86_platform __ro_after_init = {
.get_nmi_reason = default_get_nmi_reason,
.save_sched_clock_state = tsc_save_sched_clock_state,
.restore_sched_clock_state = tsc_restore_sched_clock_state,
.realmode_reserve = reserve_real_mode,
.realmode_init = init_real_mode,
.hyper.pin_vcpu = x86_op_int_noop,
.guest = {
......
......@@ -200,14 +200,18 @@ static void __init set_real_mode_permissions(void)
set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
}
static int __init init_real_mode(void)
void __init init_real_mode(void)
{
if (!real_mode_header)
panic("Real mode trampoline was not allocated");
setup_real_mode();
set_real_mode_permissions();
}
static int __init do_init_real_mode(void)
{
x86_platform.realmode_init();
return 0;
}
early_initcall(init_real_mode);
early_initcall(do_init_real_mode);
......@@ -1266,6 +1266,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
xen_vcpu_info_reset(0);
x86_platform.get_nmi_reason = xen_get_nmi_reason;
x86_platform.realmode_reserve = x86_init_noop;
x86_platform.realmode_init = x86_init_noop;
x86_init.resources.memory_setup = xen_memory_setup;
x86_init.irqs.intr_mode_select = x86_init_noop;
......
......@@ -23,7 +23,7 @@
const efi_system_table_t *efi_system_table;
const efi_dxe_services_table_t *efi_dxe_table;
extern u32 image_offset;
u32 image_offset __section(".data");
static efi_loaded_image_t *image = NULL;
static efi_status_t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment