Commit 5b2fc515 authored by Peter Zijlstra's avatar Peter Zijlstra

x86/ibt,xen: Sprinkle the ENDBR

Even though Xen currently doesn't advertise IBT, prepare for when it
will eventually do so and sprinkle the ENDBR dust accordingly.

Even though most of the entry points are IRET like, the CPL0
Hypervisor can set WAIT-FOR-ENDBR and demand ENDBR at these sites.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154317.873919996@infradead.org
parent 8b87d8ce
...@@ -809,6 +809,7 @@ SYM_CODE_END(exc_xen_hypervisor_callback) ...@@ -809,6 +809,7 @@ SYM_CODE_END(exc_xen_hypervisor_callback)
*/ */
SYM_CODE_START(xen_failsafe_callback) SYM_CODE_START(xen_failsafe_callback)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR
movl %ds, %ecx movl %ds, %ecx
cmpw %cx, 0x10(%rsp) cmpw %cx, 0x10(%rsp)
jne 1f jne 1f
......
...@@ -283,7 +283,7 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) ...@@ -283,7 +283,7 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
* pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
* max 8 bytes. * max 8 bytes.
*/ */
#define XEN_EARLY_IDT_HANDLER_SIZE 8 #define XEN_EARLY_IDT_HANDLER_SIZE (8 + ENDBR_INSN_SIZE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -383,6 +383,7 @@ SYM_CODE_START(early_idt_handler_array) ...@@ -383,6 +383,7 @@ SYM_CODE_START(early_idt_handler_array)
.endr .endr
UNWIND_HINT_IRET_REGS offset=16 UNWIND_HINT_IRET_REGS offset=16
SYM_CODE_END(early_idt_handler_array) SYM_CODE_END(early_idt_handler_array)
ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
SYM_CODE_START_LOCAL(early_idt_handler_common) SYM_CODE_START_LOCAL(early_idt_handler_common)
/* /*
......
...@@ -624,6 +624,9 @@ static struct trap_array_entry trap_array[] = { ...@@ -624,6 +624,9 @@ static struct trap_array_entry trap_array[] = {
TRAP_ENTRY(exc_coprocessor_error, false ), TRAP_ENTRY(exc_coprocessor_error, false ),
TRAP_ENTRY(exc_alignment_check, false ), TRAP_ENTRY(exc_alignment_check, false ),
TRAP_ENTRY(exc_simd_coprocessor_error, false ), TRAP_ENTRY(exc_simd_coprocessor_error, false ),
#ifdef CONFIG_X86_KERNEL_IBT
TRAP_ENTRY(exc_control_protection, false ),
#endif
}; };
static bool __ref get_trap_addr(void **addr, unsigned int ist) static bool __ref get_trap_addr(void **addr, unsigned int ist)
......
...@@ -122,6 +122,7 @@ SYM_FUNC_END(xen_read_cr2_direct); ...@@ -122,6 +122,7 @@ SYM_FUNC_END(xen_read_cr2_direct);
.macro xen_pv_trap name .macro xen_pv_trap name
SYM_CODE_START(xen_\name) SYM_CODE_START(xen_\name)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR
pop %rcx pop %rcx
pop %r11 pop %r11
jmp \name jmp \name
...@@ -147,6 +148,9 @@ xen_pv_trap asm_exc_page_fault ...@@ -147,6 +148,9 @@ xen_pv_trap asm_exc_page_fault
xen_pv_trap asm_exc_spurious_interrupt_bug xen_pv_trap asm_exc_spurious_interrupt_bug
xen_pv_trap asm_exc_coprocessor_error xen_pv_trap asm_exc_coprocessor_error
xen_pv_trap asm_exc_alignment_check xen_pv_trap asm_exc_alignment_check
#ifdef CONFIG_X86_KERNEL_IBT
xen_pv_trap asm_exc_control_protection
#endif
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
xen_pv_trap asm_xenpv_exc_machine_check xen_pv_trap asm_xenpv_exc_machine_check
#endif /* CONFIG_X86_MCE */ #endif /* CONFIG_X86_MCE */
...@@ -162,6 +166,7 @@ SYM_CODE_START(xen_early_idt_handler_array) ...@@ -162,6 +166,7 @@ SYM_CODE_START(xen_early_idt_handler_array)
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR
pop %rcx pop %rcx
pop %r11 pop %r11
jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
...@@ -231,6 +236,7 @@ SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) ...@@ -231,6 +236,7 @@ SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
/* Normal 64-bit system call target */ /* Normal 64-bit system call target */
SYM_CODE_START(xen_syscall_target) SYM_CODE_START(xen_syscall_target)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR
popq %rcx popq %rcx
popq %r11 popq %r11
...@@ -250,6 +256,7 @@ SYM_CODE_END(xen_syscall_target) ...@@ -250,6 +256,7 @@ SYM_CODE_END(xen_syscall_target)
/* 32-bit compat syscall target */ /* 32-bit compat syscall target */
SYM_CODE_START(xen_syscall32_target) SYM_CODE_START(xen_syscall32_target)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR
popq %rcx popq %rcx
popq %r11 popq %r11
...@@ -267,6 +274,7 @@ SYM_CODE_END(xen_syscall32_target) ...@@ -267,6 +274,7 @@ SYM_CODE_END(xen_syscall32_target)
/* 32-bit compat sysenter target */ /* 32-bit compat sysenter target */
SYM_CODE_START(xen_sysenter_target) SYM_CODE_START(xen_sysenter_target)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR
/* /*
* NB: Xen is polite and clears TF from EFLAGS for us. This means * NB: Xen is polite and clears TF from EFLAGS for us. This means
* that we don't need to guard against single step exceptions here. * that we don't need to guard against single step exceptions here.
...@@ -290,6 +298,7 @@ SYM_CODE_END(xen_sysenter_target) ...@@ -290,6 +298,7 @@ SYM_CODE_END(xen_sysenter_target)
SYM_CODE_START(xen_syscall32_target) SYM_CODE_START(xen_syscall32_target)
SYM_CODE_START(xen_sysenter_target) SYM_CODE_START(xen_sysenter_target)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR
lea 16(%rsp), %rsp /* strip %rcx, %r11 */ lea 16(%rsp), %rsp /* strip %rcx, %r11 */
mov $-ENOSYS, %rax mov $-ENOSYS, %rax
pushq $0 pushq $0
......
...@@ -25,8 +25,12 @@ ...@@ -25,8 +25,12 @@
SYM_CODE_START(hypercall_page) SYM_CODE_START(hypercall_page)
.rept (PAGE_SIZE / 32) .rept (PAGE_SIZE / 32)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
.skip 31, 0x90 ANNOTATE_NOENDBR
RET ret
/*
* Xen will write the hypercall page, and sort out ENDBR.
*/
.skip 31, 0xcc
.endr .endr
#define HYPERCALL(n) \ #define HYPERCALL(n) \
...@@ -74,6 +78,7 @@ SYM_CODE_END(startup_xen) ...@@ -74,6 +78,7 @@ SYM_CODE_END(startup_xen)
.pushsection .text .pushsection .text
SYM_CODE_START(asm_cpu_bringup_and_idle) SYM_CODE_START(asm_cpu_bringup_and_idle)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ENDBR
call cpu_bringup_and_idle call cpu_bringup_and_idle
SYM_CODE_END(asm_cpu_bringup_and_idle) SYM_CODE_END(asm_cpu_bringup_and_idle)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment