Commit 1a5488ef authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Invoke NMI handler via indirect call instead of INTn

Rework NMI VM-Exit handling to invoke the kernel handler by function
call instead of INTn.  INTn microcode is relatively expensive, and
aligning the IRQ and NMI handling will make it easier to update KVM
should some newfangled method for invoking the handlers come along.
Suggested-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200915191505.10355-3-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 535f7ef2
...@@ -6325,40 +6325,40 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) ...@@ -6325,40 +6325,40 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
void vmx_do_interrupt_nmi_irqoff(unsigned long entry); void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
{
unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
gate_desc *desc = (gate_desc *)host_idt_base + vector;
kvm_before_interrupt(vcpu);
vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
kvm_after_interrupt(vcpu);
}
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
{ {
u32 intr_info = vmx_get_intr_info(&vmx->vcpu); u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
/* if exit due to PF check for async PF */ /* if exit due to PF check for async PF */
if (is_page_fault(intr_info)) { if (is_page_fault(intr_info))
vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
/* Handle machine checks before interrupts are enabled */ /* Handle machine checks before interrupts are enabled */
} else if (is_machine_check(intr_info)) { else if (is_machine_check(intr_info))
kvm_machine_check(); kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */ /* We need to handle NMIs before interrupts are enabled */
} else if (is_nmi(intr_info)) { else if (is_nmi(intr_info))
kvm_before_interrupt(&vmx->vcpu); handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
asm("int $2");
kvm_after_interrupt(&vmx->vcpu);
}
} }
static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
{ {
unsigned int vector;
gate_desc *desc;
u32 intr_info = vmx_get_intr_info(vcpu); u32 intr_info = vmx_get_intr_info(vcpu);
if (WARN_ONCE(!is_external_intr(intr_info), if (WARN_ONCE(!is_external_intr(intr_info),
"KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
return; return;
vector = intr_info & INTR_INFO_VECTOR_MASK; handle_interrupt_nmi_irqoff(vcpu, intr_info);
desc = (gate_desc *)host_idt_base + vector;
kvm_before_interrupt(vcpu);
vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
kvm_after_interrupt(vcpu);
} }
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment