Commit 9cf30606 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Dave Hansen

x86/boot: Set CR0.NE early and keep it set during the boot

TDX guest requires CR0.NE to be set. Clearing the bit triggers #GP(0).

If CR0.NE is 0, the MS-DOS compatibility mode for handling floating-point
exceptions is selected. In this mode, the software exception handler for
floating-point exceptions is invoked externally using the processor’s
FERR#, INTR, and IGNNE# pins.

Using FERR# and IGNNE# to handle floating-point exception is deprecated.
CR0.NE=0 also limits newer processors to operate with one logical
processor active.

Kernel uses CR0_STATE constant to initialize CR0. It has NE bit set.
But during early boot kernel has more ad-hoc approach to setting bit
in the register. During some of this ad-hoc manipulation, CR0.NE is
cleared. This causes a #GP in TDX guests and makes it die in early boot.

Make CR0 initialization consistent, deriving the initial value of CR0
from CR0_STATE. Since CR0_STATE always has CR0.NE=1, this ensures that
CR0.NE is never 0 and avoids the #GP.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20220405232939.73860-23-kirill.shutemov@linux.intel.com
parent f39642d0
...@@ -289,7 +289,7 @@ SYM_FUNC_START(startup_32) ...@@ -289,7 +289,7 @@ SYM_FUNC_START(startup_32)
pushl %eax pushl %eax
/* Enter paged protected Mode, activating Long Mode */ /* Enter paged protected Mode, activating Long Mode */
movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */ movl $CR0_STATE, %eax
movl %eax, %cr0 movl %eax, %cr0
/* Jump from 32bit compatibility mode into 64bit mode. */ /* Jump from 32bit compatibility mode into 64bit mode. */
...@@ -661,8 +661,9 @@ SYM_CODE_START(trampoline_32bit_src) ...@@ -661,8 +661,9 @@ SYM_CODE_START(trampoline_32bit_src)
pushl $__KERNEL_CS pushl $__KERNEL_CS
pushl %eax pushl %eax
/* Enable paging again */ /* Enable paging again. */
movl $(X86_CR0_PG | X86_CR0_PE), %eax movl %cr0, %eax
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0 movl %eax, %cr0
lret lret
......
...@@ -70,7 +70,7 @@ SYM_CODE_START(trampoline_start) ...@@ -70,7 +70,7 @@ SYM_CODE_START(trampoline_start)
movw $__KERNEL_DS, %dx # Data segment descriptor movw $__KERNEL_DS, %dx # Data segment descriptor
# Enable protected mode # Enable protected mode
movl $X86_CR0_PE, %eax # protected mode (PE) bit movl $(CR0_STATE & ~X86_CR0_PG), %eax
movl %eax, %cr0 # into protected mode movl %eax, %cr0 # into protected mode
# flush prefetch and jump to startup_32 # flush prefetch and jump to startup_32
...@@ -148,8 +148,8 @@ SYM_CODE_START(startup_32) ...@@ -148,8 +148,8 @@ SYM_CODE_START(startup_32)
movl $MSR_EFER, %ecx movl $MSR_EFER, %ecx
wrmsr wrmsr
# Enable paging and in turn activate Long Mode # Enable paging and in turn activate Long Mode.
movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax movl $CR0_STATE, %eax
movl %eax, %cr0 movl %eax, %cr0
/* /*
...@@ -169,7 +169,7 @@ SYM_CODE_START(pa_trampoline_compat) ...@@ -169,7 +169,7 @@ SYM_CODE_START(pa_trampoline_compat)
movl $rm_stack_end, %esp movl $rm_stack_end, %esp
movw $__KERNEL_DS, %dx movw $__KERNEL_DS, %dx
movl $X86_CR0_PE, %eax movl $(CR0_STATE & ~X86_CR0_PG), %eax
movl %eax, %cr0 movl %eax, %cr0
ljmpl $__KERNEL32_CS, $pa_startup_32 ljmpl $__KERNEL32_CS, $pa_startup_32
SYM_CODE_END(pa_trampoline_compat) SYM_CODE_END(pa_trampoline_compat)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment