Commit ffdf7f9e authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: nSVM: extract svm_set_gif

Extract the code that is needed to implement CLGI and STGI,
so that we can run it from VMRUN and vmexit (and in the future,
KVM_SET_NESTED_STATE).  Skip the request for KVM_REQ_EVENT unless needed,
subsuming the evaluate_pending_interrupts optimization that is found
in enter_svm_guest_mode.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 31031098
...@@ -83,6 +83,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) ...@@ -83,6 +83,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
} }
EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
/* /*
* check if there is pending interrupt without * check if there is pending interrupt without
......
...@@ -333,10 +333,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm) ...@@ -333,10 +333,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb) struct vmcb *nested_vmcb)
{ {
bool evaluate_pending_interrupts =
is_intercept(svm, INTERCEPT_VINTR) ||
is_intercept(svm, INTERCEPT_IRET);
svm->nested.vmcb = vmcb_gpa; svm->nested.vmcb = vmcb_gpa;
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK; svm->vcpu.arch.hflags |= HF_HIF_MASK;
...@@ -347,21 +343,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -347,21 +343,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
nested_prepare_vmcb_save(svm, nested_vmcb); nested_prepare_vmcb_save(svm, nested_vmcb);
nested_prepare_vmcb_control(svm); nested_prepare_vmcb_control(svm);
/* svm_set_gif(svm, true);
* If L1 had a pending IRQ/NMI before executing VMRUN,
* which wasn't delivered because it was disallowed (e.g.
* interrupts disabled), L0 needs to evaluate if this pending
* event should cause an exit from L2 to L1 or be delivered
* directly to L2.
*
* Usually this would be handled by the processor noticing an
* IRQ/NMI window request. However, VMRUN can unblock interrupts
* by implicitly setting GIF, so force L0 to perform pending event
* evaluation by requesting a KVM_REQ_EVENT.
*/
enable_gif(svm);
if (unlikely(evaluate_pending_interrupts))
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
} }
int nested_svm_vmrun(struct vcpu_svm *svm) int nested_svm_vmrun(struct vcpu_svm *svm)
...@@ -505,7 +487,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -505,7 +487,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
/* Give the current vmcb to the guest */ /* Give the current vmcb to the guest */
disable_gif(svm); svm_set_gif(svm, false);
nested_vmcb->save.es = vmcb->save.es; nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs; nested_vmcb->save.cs = vmcb->save.cs;
......
...@@ -1981,6 +1981,38 @@ static int vmrun_interception(struct vcpu_svm *svm) ...@@ -1981,6 +1981,38 @@ static int vmrun_interception(struct vcpu_svm *svm)
return nested_svm_vmrun(svm); return nested_svm_vmrun(svm);
} }
void svm_set_gif(struct vcpu_svm *svm, bool value)
{
if (value) {
/*
* If VGIF is enabled, the STGI intercept is only added to
* detect the opening of the SMI/NMI window; remove it now.
* Likewise, clear the VINTR intercept, we will set it
* again while processing KVM_REQ_EVENT if needed.
*/
if (vgif_enabled(svm))
clr_intercept(svm, INTERCEPT_STGI);
if (is_intercept(svm, SVM_EXIT_VINTR))
svm_clear_vintr(svm);
enable_gif(svm);
if (svm->vcpu.arch.smi_pending ||
svm->vcpu.arch.nmi_pending ||
kvm_cpu_has_injectable_intr(&svm->vcpu))
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
} else {
disable_gif(svm);
/*
* After a CLGI no interrupts should come. But if vGIF is
* in use, we still rely on the VINTR intercept (rather than
* STGI) to detect an open interrupt window.
*/
if (!vgif_enabled(svm))
svm_clear_vintr(svm);
}
}
static int stgi_interception(struct vcpu_svm *svm) static int stgi_interception(struct vcpu_svm *svm)
{ {
int ret; int ret;
...@@ -1988,18 +2020,8 @@ static int stgi_interception(struct vcpu_svm *svm) ...@@ -1988,18 +2020,8 @@ static int stgi_interception(struct vcpu_svm *svm)
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(svm))
return 1; return 1;
/*
* If VGIF is enabled, the STGI intercept is only added to
* detect the opening of the SMI/NMI window; remove it now.
*/
if (vgif_enabled(svm))
clr_intercept(svm, INTERCEPT_STGI);
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(&svm->vcpu);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm_set_gif(svm, true);
enable_gif(svm);
return ret; return ret;
} }
...@@ -2011,12 +2033,7 @@ static int clgi_interception(struct vcpu_svm *svm) ...@@ -2011,12 +2033,7 @@ static int clgi_interception(struct vcpu_svm *svm)
return 1; return 1;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(&svm->vcpu);
svm_set_gif(svm, false);
disable_gif(svm);
/* After a CLGI no interrupts should come */
svm_clear_vintr(svm);
return ret; return ret;
} }
......
...@@ -357,6 +357,7 @@ void disable_nmi_singlestep(struct vcpu_svm *svm); ...@@ -357,6 +357,7 @@ void disable_nmi_singlestep(struct vcpu_svm *svm);
bool svm_smi_blocked(struct kvm_vcpu *vcpu); bool svm_smi_blocked(struct kvm_vcpu *vcpu);
bool svm_nmi_blocked(struct kvm_vcpu *vcpu); bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
void svm_set_gif(struct vcpu_svm *svm, bool value);
/* nested.c */ /* nested.c */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment