Commit 9c3d370a authored by Cathy Avery's avatar Cathy Avery Committed by Paolo Bonzini

KVM: SVM: Implement check_nested_events for NMI

Migrate nested guest NMI intercept processing
to new check_nested_events.
Signed-off-by: default avatarCathy Avery <cavery@redhat.com>
Message-Id: <20200414201107.22952-2-cavery@redhat.com>
[Reorder clauses as NMIs have higher priority than IRQs; inject
 immediate vmexit as is now done for IRQ vmexits. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6e085cbf
...@@ -799,6 +799,20 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, ...@@ -799,6 +799,20 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
return vmexit; return vmexit;
} }
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
{
return (svm->nested.intercept & (1ULL << INTERCEPT_NMI));
}
static void nested_svm_nmi(struct vcpu_svm *svm)
{
svm->vmcb->control.exit_code = SVM_EXIT_NMI;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
nested_svm_vmexit(svm);
}
static void nested_svm_intr(struct vcpu_svm *svm) static void nested_svm_intr(struct vcpu_svm *svm)
{ {
trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
...@@ -822,6 +836,13 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) ...@@ -822,6 +836,13 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required || kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required ||
svm->nested.nested_run_pending; svm->nested.nested_run_pending;
if (vcpu->arch.nmi_pending && nested_exit_on_nmi(svm)) {
if (block_nested_events)
return -EBUSY;
nested_svm_nmi(svm);
return 0;
}
if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) { if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) {
if (block_nested_events) if (block_nested_events)
return -EBUSY; return -EBUSY;
......
...@@ -3070,9 +3070,10 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu) ...@@ -3070,9 +3070,10 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
int ret; int ret;
ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
!(svm->vcpu.arch.hflags & HF_NMI_MASK); !(svm->vcpu.arch.hflags & HF_NMI_MASK);
ret = ret && gif_set(svm) && nested_svm_nmi(svm); ret = ret && gif_set(svm);
return ret; return ret;
} }
...@@ -3150,9 +3151,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) ...@@ -3150,9 +3151,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
return; /* STGI will cause a vm exit */ return; /* STGI will cause a vm exit */
} }
if (svm->nested.exit_required)
return; /* we're not going to run the guest yet */
/* /*
* Something prevents NMI from been injected. Single step over possible * Something prevents NMI from been injected. Single step over possible
* problem (IRET or exception injection or interrupt shadow) * problem (IRET or exception injection or interrupt shadow)
......
...@@ -373,21 +373,6 @@ void disable_nmi_singlestep(struct vcpu_svm *svm); ...@@ -373,21 +373,6 @@ void disable_nmi_singlestep(struct vcpu_svm *svm);
#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
/* This function returns true if it is save to enable the nmi window */
static inline bool nested_svm_nmi(struct vcpu_svm *svm)
{
if (!is_guest_mode(&svm->vcpu))
return true;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
return true;
svm->vmcb->control.exit_code = SVM_EXIT_NMI;
svm->nested.exit_required = true;
return false;
}
static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu) static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
{ {
return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK); return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment