Commit 125cfa0d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-entry-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 conversion to generic entry code from Thomas Gleixner:
 "The conversion of X86 syscall, interrupt and exception entry/exit
  handling to the generic code.

  Pretty much a straight-forward 1:1 conversion plus the consolidation
  of the KVM handling of pending work before entering guest mode"

* tag 'x86-entry-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/kvm: Use __xfer_to_guest_mode_work_pending() in kvm_run_vcpu()
  x86/kvm: Use generic xfer to guest work function
  x86/entry: Cleanup idtentry_enter/exit
  x86/entry: Use generic interrupt entry/exit code
  x86/entry: Cleanup idtentry_entry/exit_user
  x86/entry: Use generic syscall exit functionality
  x86/entry: Use generic syscall entry function
  x86/ptrace: Provide pt_regs helper for entry/exit
  x86/entry: Move user return notifier out of loop
  x86/entry: Consolidate 32/64 bit syscall entry
  x86/entry: Consolidate check_user_regs()
  x86: Correct noinstr qualifiers
  x86/idtentry: Remove stale comment
parents 3f0d6ecd adb334d1
...@@ -115,6 +115,7 @@ config X86 ...@@ -115,6 +115,7 @@ config X86
select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP select GENERIC_EARLY_IOREMAP
select GENERIC_ENTRY
select GENERIC_FIND_FIRST_BIT select GENERIC_FIND_FIRST_BIT
select GENERIC_IOMAP select GENERIC_IOMAP
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
......
This diff is collapsed.
...@@ -846,7 +846,7 @@ SYM_CODE_START(ret_from_fork) ...@@ -846,7 +846,7 @@ SYM_CODE_START(ret_from_fork)
2: 2:
/* When we fork, we trace the syscall return in the child, too. */ /* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax movl %esp, %eax
call syscall_return_slowpath call syscall_exit_to_user_mode
jmp .Lsyscall_32_done jmp .Lsyscall_32_done
/* kernel thread */ /* kernel thread */
......
...@@ -283,7 +283,7 @@ SYM_CODE_START(ret_from_fork) ...@@ -283,7 +283,7 @@ SYM_CODE_START(ret_from_fork)
2: 2:
UNWIND_HINT_REGS UNWIND_HINT_REGS
movq %rsp, %rdi movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */ call syscall_exit_to_user_mode /* returns with IRQs disabled */
jmp swapgs_restore_regs_and_return_to_usermode jmp swapgs_restore_regs_and_return_to_usermode
1: 1:
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_X86_ENTRY_COMMON_H
#define _ASM_X86_ENTRY_COMMON_H
#include <linux/user-return-notifier.h>
#include <asm/nospec-branch.h>
#include <asm/io_bitmap.h>
#include <asm/fpu/api.h>
/* Check that the stack and regs on entry from user mode are sane. */
static __always_inline void arch_check_user_regs(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
/*
* Make sure that the entry code gave us a sensible EFLAGS
* register. Native because we want to check the actual CPU
* state, not the interrupt state as imagined by Xen.
*/
unsigned long flags = native_save_fl();
WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
X86_EFLAGS_NT));
/* We think we came from user mode. Make sure pt_regs agrees. */
WARN_ON_ONCE(!user_mode(regs));
/*
* All entries from user mode (except #DF) should be on the
* normal thread stack and should have user pt_regs in the
* correct location.
*/
WARN_ON_ONCE(!on_thread_stack());
WARN_ON_ONCE(regs != task_pt_regs(current));
}
}
#define arch_check_user_regs arch_check_user_regs
#define ARCH_SYSCALL_EXIT_WORK (_TIF_SINGLESTEP)
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{
if (ti_work & _TIF_USER_RETURN_NOTIFY)
fire_user_return_notifiers();
if (unlikely(ti_work & _TIF_IO_BITMAP))
tss_update_io_bitmap();
fpregs_assert_state_consistent();
if (unlikely(ti_work & _TIF_NEED_FPU_LOAD))
switch_fpu_return();
#ifdef CONFIG_COMPAT
/*
* Compat syscalls set TS_COMPAT. Make sure we clear it before
* returning to user mode. We need to clear it *after* signal
* handling, because syscall restart has a fixup for compat
* syscalls. The fixup is exercised by the ptrace_syscall_32
* selftest.
*
* We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
* special case only applies after poking regs and before the
* very next return to user mode.
*/
current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED);
#endif
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
static __always_inline void arch_exit_to_user_mode(void)
{
mds_user_clear_cpu_buffers();
}
#define arch_exit_to_user_mode arch_exit_to_user_mode
#endif
...@@ -6,20 +6,11 @@ ...@@ -6,20 +6,11 @@
#include <asm/trapnr.h> #include <asm/trapnr.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/entry-common.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <asm/irq_stack.h> #include <asm/irq_stack.h>
void idtentry_enter_user(struct pt_regs *regs);
void idtentry_exit_user(struct pt_regs *regs);
typedef struct idtentry_state {
bool exit_rcu;
} idtentry_state_t;
idtentry_state_t idtentry_enter(struct pt_regs *regs);
void idtentry_exit(struct pt_regs *regs, idtentry_state_t state);
bool idtentry_enter_nmi(struct pt_regs *regs); bool idtentry_enter_nmi(struct pt_regs *regs);
void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state); void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
...@@ -52,8 +43,8 @@ void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state); ...@@ -52,8 +43,8 @@ void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
* The macro is written so it acts as function definition. Append the * The macro is written so it acts as function definition. Append the
* body with a pair of curly brackets. * body with a pair of curly brackets.
* *
* idtentry_enter() contains common code which has to be invoked before * irqentry_enter() contains common code which has to be invoked before
* arbitrary code in the body. idtentry_exit() contains common code * arbitrary code in the body. irqentry_exit() contains common code
* which has to run before returning to the low level assembly code. * which has to run before returning to the low level assembly code.
*/ */
#define DEFINE_IDTENTRY(func) \ #define DEFINE_IDTENTRY(func) \
...@@ -61,12 +52,12 @@ static __always_inline void __##func(struct pt_regs *regs); \ ...@@ -61,12 +52,12 @@ static __always_inline void __##func(struct pt_regs *regs); \
\ \
__visible noinstr void func(struct pt_regs *regs) \ __visible noinstr void func(struct pt_regs *regs) \
{ \ { \
idtentry_state_t state = idtentry_enter(regs); \ irqentry_state_t state = irqentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
__##func (regs); \ __##func (regs); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit(regs, state); \ irqentry_exit(regs, state); \
} \ } \
\ \
static __always_inline void __##func(struct pt_regs *regs) static __always_inline void __##func(struct pt_regs *regs)
...@@ -108,12 +99,12 @@ static __always_inline void __##func(struct pt_regs *regs, \ ...@@ -108,12 +99,12 @@ static __always_inline void __##func(struct pt_regs *regs, \
__visible noinstr void func(struct pt_regs *regs, \ __visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \ unsigned long error_code) \
{ \ { \
idtentry_state_t state = idtentry_enter(regs); \ irqentry_state_t state = irqentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
__##func (regs, error_code); \ __##func (regs, error_code); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit(regs, state); \ irqentry_exit(regs, state); \
} \ } \
\ \
static __always_inline void __##func(struct pt_regs *regs, \ static __always_inline void __##func(struct pt_regs *regs, \
...@@ -168,7 +159,7 @@ __visible noinstr void func(struct pt_regs *regs) ...@@ -168,7 +159,7 @@ __visible noinstr void func(struct pt_regs *regs)
* body with a pair of curly brackets. * body with a pair of curly brackets.
* *
* Contrary to DEFINE_IDTENTRY_ERRORCODE() this does not invoke the * Contrary to DEFINE_IDTENTRY_ERRORCODE() this does not invoke the
* idtentry_enter/exit() helpers before and after the body invocation. This * irqentry_enter/exit() helpers before and after the body invocation. This
* needs to be done in the body itself if applicable. Use if extra work * needs to be done in the body itself if applicable. Use if extra work
* is required before the enter/exit() helpers are invoked. * is required before the enter/exit() helpers are invoked.
*/ */
...@@ -194,11 +185,9 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code) ...@@ -194,11 +185,9 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
* to the function as error_code argument which needs to be truncated * to the function as error_code argument which needs to be truncated
* to an u8 because the push is sign extending. * to an u8 because the push is sign extending.
* *
* On 64-bit idtentry_enter/exit() are invoked in the ASM entry code before
* and after switching to the interrupt stack. On 32-bit this happens in C.
*
* irq_enter/exit_rcu() are invoked before the function body and the * irq_enter/exit_rcu() are invoked before the function body and the
* KVM L1D flush request is set. * KVM L1D flush request is set. Stack switching to the interrupt stack
* has to be done in the function body if necessary.
*/ */
#define DEFINE_IDTENTRY_IRQ(func) \ #define DEFINE_IDTENTRY_IRQ(func) \
static __always_inline void __##func(struct pt_regs *regs, u8 vector); \ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
...@@ -206,7 +195,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \ ...@@ -206,7 +195,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
__visible noinstr void func(struct pt_regs *regs, \ __visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \ unsigned long error_code) \
{ \ { \
idtentry_state_t state = idtentry_enter(regs); \ irqentry_state_t state = irqentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
irq_enter_rcu(); \ irq_enter_rcu(); \
...@@ -214,7 +203,7 @@ __visible noinstr void func(struct pt_regs *regs, \ ...@@ -214,7 +203,7 @@ __visible noinstr void func(struct pt_regs *regs, \
__##func (regs, (u8)error_code); \ __##func (regs, (u8)error_code); \
irq_exit_rcu(); \ irq_exit_rcu(); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit(regs, state); \ irqentry_exit(regs, state); \
} \ } \
\ \
static __always_inline void __##func(struct pt_regs *regs, u8 vector) static __always_inline void __##func(struct pt_regs *regs, u8 vector)
...@@ -238,7 +227,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector) ...@@ -238,7 +227,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector)
* DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points * DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points
* @func: Function name of the entry point * @func: Function name of the entry point
* *
* idtentry_enter/exit() and irq_enter/exit_rcu() are invoked before the * irqentry_enter/exit() and irq_enter/exit_rcu() are invoked before the
* function body. KVM L1D flush request is set. * function body. KVM L1D flush request is set.
* *
* Runs the function on the interrupt stack if the entry hit kernel mode * Runs the function on the interrupt stack if the entry hit kernel mode
...@@ -248,7 +237,7 @@ static void __##func(struct pt_regs *regs); \ ...@@ -248,7 +237,7 @@ static void __##func(struct pt_regs *regs); \
\ \
__visible noinstr void func(struct pt_regs *regs) \ __visible noinstr void func(struct pt_regs *regs) \
{ \ { \
idtentry_state_t state = idtentry_enter(regs); \ irqentry_state_t state = irqentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
irq_enter_rcu(); \ irq_enter_rcu(); \
...@@ -256,7 +245,7 @@ __visible noinstr void func(struct pt_regs *regs) \ ...@@ -256,7 +245,7 @@ __visible noinstr void func(struct pt_regs *regs) \
run_on_irqstack_cond(__##func, regs, regs); \ run_on_irqstack_cond(__##func, regs, regs); \
irq_exit_rcu(); \ irq_exit_rcu(); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit(regs, state); \ irqentry_exit(regs, state); \
} \ } \
\ \
static noinline void __##func(struct pt_regs *regs) static noinline void __##func(struct pt_regs *regs)
...@@ -277,7 +266,7 @@ static __always_inline void __##func(struct pt_regs *regs); \ ...@@ -277,7 +266,7 @@ static __always_inline void __##func(struct pt_regs *regs); \
\ \
__visible noinstr void func(struct pt_regs *regs) \ __visible noinstr void func(struct pt_regs *regs) \
{ \ { \
idtentry_state_t state = idtentry_enter(regs); \ irqentry_state_t state = irqentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
__irq_enter_raw(); \ __irq_enter_raw(); \
...@@ -285,7 +274,7 @@ __visible noinstr void func(struct pt_regs *regs) \ ...@@ -285,7 +274,7 @@ __visible noinstr void func(struct pt_regs *regs) \
__##func (regs); \ __##func (regs); \
__irq_exit_raw(); \ __irq_exit_raw(); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit(regs, state); \ irqentry_exit(regs, state); \
} \ } \
\ \
static __always_inline void __##func(struct pt_regs *regs) static __always_inline void __##func(struct pt_regs *regs)
......
...@@ -209,6 +209,11 @@ static inline void user_stack_pointer_set(struct pt_regs *regs, ...@@ -209,6 +209,11 @@ static inline void user_stack_pointer_set(struct pt_regs *regs,
regs->sp = val; regs->sp = val;
} }
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return !(regs->flags & X86_EFLAGS_IF);
}
/* Query offset/name of register from its name/offset */ /* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name); extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset); extern const char *regs_query_register_name(unsigned int offset);
......
...@@ -35,7 +35,6 @@ typedef sigset_t compat_sigset_t; ...@@ -35,7 +35,6 @@ typedef sigset_t compat_sigset_t;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#include <uapi/asm/signal.h> #include <uapi/asm/signal.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void do_signal(struct pt_regs *regs);
#define __ARCH_HAS_SA_RESTORER #define __ARCH_HAS_SA_RESTORER
......
...@@ -133,11 +133,6 @@ struct thread_info { ...@@ -133,11 +133,6 @@ struct thread_info {
#define _TIF_X32 (1 << TIF_X32) #define _TIF_X32 (1 << TIF_X32)
#define _TIF_FSCHECK (1 << TIF_FSCHECK) #define _TIF_FSCHECK (1 << TIF_FSCHECK)
/* Work to do before invoking the actual syscall. */
#define _TIF_WORK_SYSCALL_ENTRY \
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
/* flags to check in __switch_to() */ /* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \ #define _TIF_WORK_CTXSW_BASE \
(_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \ (_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
......
...@@ -1047,7 +1047,7 @@ static __always_inline int patch_cmp(const void *key, const void *elt) ...@@ -1047,7 +1047,7 @@ static __always_inline int patch_cmp(const void *key, const void *elt)
return 0; return 0;
} }
int noinstr poke_int3_handler(struct pt_regs *regs) noinstr int poke_int3_handler(struct pt_regs *regs)
{ {
struct bp_patching_desc *desc; struct bp_patching_desc *desc;
struct text_poke_loc *tp; struct text_poke_loc *tp;
......
...@@ -1215,7 +1215,7 @@ static void kill_me_maybe(struct callback_head *cb) ...@@ -1215,7 +1215,7 @@ static void kill_me_maybe(struct callback_head *cb)
* backing the user stack, tracing that reads the user stack will cause * backing the user stack, tracing that reads the user stack will cause
* potentially infinite recursion. * potentially infinite recursion.
*/ */
void noinstr do_machine_check(struct pt_regs *regs) noinstr void do_machine_check(struct pt_regs *regs)
{ {
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
DECLARE_BITMAP(toclear, MAX_NR_BANKS); DECLARE_BITMAP(toclear, MAX_NR_BANKS);
...@@ -1930,11 +1930,11 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) ...@@ -1930,11 +1930,11 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
static __always_inline void exc_machine_check_user(struct pt_regs *regs) static __always_inline void exc_machine_check_user(struct pt_regs *regs)
{ {
idtentry_enter_user(regs); irqentry_enter_from_user_mode(regs);
instrumentation_begin(); instrumentation_begin();
machine_check_vector(regs); machine_check_vector(regs);
instrumentation_end(); instrumentation_end();
idtentry_exit_user(regs); irqentry_exit_to_user_mode(regs);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags); ...@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{ {
u32 reason = kvm_read_and_reset_apf_flags(); u32 reason = kvm_read_and_reset_apf_flags();
idtentry_state_t state; irqentry_state_t state;
switch (reason) { switch (reason) {
case KVM_PV_REASON_PAGE_NOT_PRESENT: case KVM_PV_REASON_PAGE_NOT_PRESENT:
...@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) ...@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
return false; return false;
} }
state = idtentry_enter(regs); state = irqentry_enter(regs);
instrumentation_begin(); instrumentation_begin();
/* /*
...@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) ...@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
} }
instrumentation_end(); instrumentation_end();
idtentry_exit(regs, state); irqentry_exit(regs, state);
return true; return true;
} }
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/user-return-notifier.h> #include <linux/user-return-notifier.h>
#include <linux/uprobes.h> #include <linux/uprobes.h>
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/entry-common.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -803,7 +804,7 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) ...@@ -803,7 +804,7 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
* want to handle. Thus you cannot kill init even with a SIGKILL even by * want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake. * mistake.
*/ */
void do_signal(struct pt_regs *regs) void arch_do_signal(struct pt_regs *regs)
{ {
struct ksignal ksig; struct ksignal ksig;
......
...@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs) ...@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW(exc_invalid_op) DEFINE_IDTENTRY_RAW(exc_invalid_op)
{ {
idtentry_state_t state; irqentry_state_t state;
/* /*
* We use UD2 as a short encoding for 'CALL __WARN', as such * We use UD2 as a short encoding for 'CALL __WARN', as such
...@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op) ...@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op)
if (!user_mode(regs) && handle_bug(regs)) if (!user_mode(regs) && handle_bug(regs))
return; return;
state = idtentry_enter(regs); state = irqentry_enter(regs);
instrumentation_begin(); instrumentation_begin();
handle_invalid_op(regs); handle_invalid_op(regs);
instrumentation_end(); instrumentation_end();
idtentry_exit(regs, state); irqentry_exit(regs, state);
} }
DEFINE_IDTENTRY(exc_coproc_segment_overrun) DEFINE_IDTENTRY(exc_coproc_segment_overrun)
...@@ -638,18 +638,18 @@ DEFINE_IDTENTRY_RAW(exc_int3) ...@@ -638,18 +638,18 @@ DEFINE_IDTENTRY_RAW(exc_int3)
return; return;
/* /*
* idtentry_enter_user() uses static_branch_{,un}likely() and therefore * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
* can trigger INT3, hence poke_int3_handler() must be done * and therefore can trigger INT3, hence poke_int3_handler() must
* before. If the entry came from kernel mode, then use nmi_enter() * be done before. If the entry came from kernel mode, then use
* because the INT3 could have been hit in any context including * nmi_enter() because the INT3 could have been hit in any context
* NMI. * including NMI.
*/ */
if (user_mode(regs)) { if (user_mode(regs)) {
idtentry_enter_user(regs); irqentry_enter_from_user_mode(regs);
instrumentation_begin(); instrumentation_begin();
do_int3_user(regs); do_int3_user(regs);
instrumentation_end(); instrumentation_end();
idtentry_exit_user(regs); irqentry_exit_to_user_mode(regs);
} else { } else {
bool irq_state = idtentry_enter_nmi(regs); bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin(); instrumentation_begin();
...@@ -895,13 +895,13 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, ...@@ -895,13 +895,13 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
*/ */
WARN_ON_ONCE(!user_mode(regs)); WARN_ON_ONCE(!user_mode(regs));
idtentry_enter_user(regs); irqentry_enter_from_user_mode(regs);
instrumentation_begin(); instrumentation_begin();
handle_debug(regs, dr6, true); handle_debug(regs, dr6, true);
instrumentation_end(); instrumentation_end();
idtentry_exit_user(regs); irqentry_exit_to_user_mode(regs);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -42,6 +42,7 @@ config KVM ...@@ -42,6 +42,7 @@ config KVM
select HAVE_KVM_MSI select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_NO_POLL select HAVE_KVM_NO_POLL
select KVM_XFER_TO_GUEST_WORK
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO select KVM_VFIO
select SRCU select SRCU
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/tboot.h> #include <linux/tboot.h>
#include <linux/trace_events.h> #include <linux/trace_events.h>
#include <linux/entry-kvm.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -5373,14 +5374,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) ...@@ -5373,14 +5374,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
} }
/* /*
* Note, return 1 and not 0, vcpu_run() is responsible for * Note, return 1 and not 0, vcpu_run() will invoke
* morphing the pending signal into the proper return code. * xfer_to_guest_mode() which will create a proper return
* code.
*/ */
if (signal_pending(current)) if (__xfer_to_guest_mode_work_pending())
return 1; return 1;
if (need_resched())
schedule();
} }
return 1; return 1;
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include <linux/sched/stat.h> #include <linux/sched/stat.h>
#include <linux/sched/isolation.h> #include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
...@@ -1587,7 +1588,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); ...@@ -1587,7 +1588,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{ {
return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
need_resched() || signal_pending(current); xfer_to_guest_mode_work_pending();
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request); EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
...@@ -8681,15 +8682,11 @@ static int vcpu_run(struct kvm_vcpu *vcpu) ...@@ -8681,15 +8682,11 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
break; break;
} }
if (signal_pending(current)) { if (__xfer_to_guest_mode_work_pending()) {
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
break;
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
cond_resched(); r = xfer_to_guest_mode_handle_work(vcpu);
if (r)
return r;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
} }
} }
......
...@@ -1377,7 +1377,7 @@ handle_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1377,7 +1377,7 @@ handle_page_fault(struct pt_regs *regs, unsigned long error_code,
DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
{ {
unsigned long address = read_cr2(); unsigned long address = read_cr2();
idtentry_state_t state; irqentry_state_t state;
prefetchw(&current->mm->mmap_lock); prefetchw(&current->mm->mmap_lock);
...@@ -1412,11 +1412,11 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) ...@@ -1412,11 +1412,11 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
* code reenabled RCU to avoid subsequent wreckage which helps * code reenabled RCU to avoid subsequent wreckage which helps
* debugability. * debugability.
*/ */
state = idtentry_enter(regs); state = irqentry_enter(regs);
instrumentation_begin(); instrumentation_begin();
handle_page_fault(regs, error_code, address); handle_page_fault(regs, error_code, address);
instrumentation_end(); instrumentation_end();
idtentry_exit(regs, state); irqentry_exit(regs, state);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment