Commit 135961e0 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Paolo Bonzini

x86/kvm/svm: Move guest enter/exit into .noinstr.text

Move the functions which are inside the RCU off region into the
non-instrumentable text section.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

Message-Id: <20200708195322.144607767@linutronix.de>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3ebccdf3
...@@ -3343,6 +3343,60 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) ...@@ -3343,6 +3343,60 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_svm *svm)
{
/*
* VMENTER enables interrupts (host state), but the kernel state is
* interrupts disabled when this is invoked. Also tell RCU about
* it. This is the same logic as for exit_to_user_mode().
*
* This ensures that e.g. latency analysis on the host observes
* guest mode as interrupt enabled.
*
* guest_enter_irqoff() informs context tracking about the
* transition to guest mode and if enabled adjusts RCU state
* accordingly.
*/
instrumentation_begin();
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
instrumentation_end();
guest_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
loadsegment(fs, svm->host.fs);
#ifndef CONFIG_X86_32_LAZY_GS
loadsegment(gs, svm->host.gs);
#endif
#endif
/*
* VMEXIT disables interrupts (host state), but tracing and lockdep
* have them in state 'on' as recorded before entering guest mode.
* Same as enter_from_user_mode().
*
* guest_exit_irqoff() restores host context and reinstates RCU if
* enabled and required.
*
* This needs to be done before the below as native_read_msr()
* contains a tracepoint and x86_spec_ctrl_restore_host() calls
* into world and some more.
*/
lockdep_hardirqs_off(CALLER_ADDR0);
guest_exit_irqoff();
instrumentation_begin();
trace_hardirqs_off_finish();
instrumentation_end();
}
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
{ {
fastpath_t exit_fastpath; fastpath_t exit_fastpath;
...@@ -3398,49 +3452,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3398,49 +3452,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
*/ */
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
/* svm_vcpu_enter_exit(vcpu, svm);
* VMENTER enables interrupts (host state), but the kernel state is
* interrupts disabled when this is invoked. Also tell RCU about
* it. This is the same logic as for exit_to_user_mode().
*
* This ensures that e.g. latency analysis on the host observes
* guest mode as interrupt enabled.
*
* guest_enter_irqoff() informs context tracking about the
* transition to guest mode and if enabled adjusts RCU state
* accordingly.
*/
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
guest_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
loadsegment(fs, svm->host.fs);
#ifndef CONFIG_X86_32_LAZY_GS
loadsegment(gs, svm->host.gs);
#endif
#endif
/*
* VMEXIT disables interrupts (host state), but tracing and lockdep
* have them in state 'on' as recorded before entering guest mode.
* Same as enter_from_user_mode().
*
* guest_exit_irqoff() restores host context and reinstates RCU if
* enabled and required.
*
* This needs to be done before the below as native_read_msr()
* contains a tracepoint and x86_spec_ctrl_restore_host() calls
* into world and some more.
*/
lockdep_hardirqs_off(CALLER_ADDR0);
guest_exit_irqoff();
trace_hardirqs_off_finish();
/* /*
* We do not use IBRS in the kernel. If this vCPU has used the * We do not use IBRS in the kernel. If this vCPU has used the
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
#endif #endif
.text .section .noinstr.text, "ax"
/** /**
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment