Commit 2c82878b authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Radim Krčmář

KVM: VMX: require virtual NMI support

Virtual NMIs are only missing in Prescott and Yonah chips.  Both are obsolete
for virtualization usage---Yonah is 32-bit only even---so drop vNMI emulation.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 74f16909
...@@ -615,10 +615,6 @@ struct vcpu_vmx { ...@@ -615,10 +615,6 @@ struct vcpu_vmx {
int vpid; int vpid;
bool emulation_required; bool emulation_required;
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
ktime_t entry_time;
s64 vnmi_blocked_time;
u32 exit_reason; u32 exit_reason;
/* Posted interrupt descriptor */ /* Posted interrupt descriptor */
...@@ -1290,11 +1286,6 @@ static inline bool cpu_has_vmx_invpcid(void) ...@@ -1290,11 +1286,6 @@ static inline bool cpu_has_vmx_invpcid(void)
SECONDARY_EXEC_ENABLE_INVPCID; SECONDARY_EXEC_ENABLE_INVPCID;
} }
static inline bool cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}
static inline bool cpu_has_vmx_wbinvd_exit(void) static inline bool cpu_has_vmx_wbinvd_exit(void)
{ {
return vmcs_config.cpu_based_2nd_exec_ctrl & return vmcs_config.cpu_based_2nd_exec_ctrl &
...@@ -3623,9 +3614,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ...@@ -3623,9 +3614,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
&_vmexit_control) < 0) &_vmexit_control) < 0)
return -EIO; return -EIO;
min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | PIN_BASED_VIRTUAL_NMIS;
PIN_BASED_VMX_PREEMPTION_TIMER; opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
&_pin_based_exec_control) < 0) &_pin_based_exec_control) < 0)
return -EIO; return -EIO;
...@@ -5298,8 +5289,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -5298,8 +5289,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx->rmode.vm86_active = 0; vmx->rmode.vm86_active = 0;
vmx->soft_vnmi_blocked = 0;
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(vcpu, 0); kvm_set_cr8(vcpu, 0);
...@@ -5419,8 +5408,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) ...@@ -5419,8 +5408,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
static void enable_nmi_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu)
{ {
if (!cpu_has_virtual_nmis() || if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
enable_irq_window(vcpu); enable_irq_window(vcpu);
return; return;
} }
...@@ -5461,19 +5449,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) ...@@ -5461,19 +5449,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!is_guest_mode(vcpu)) { if (!is_guest_mode(vcpu)) {
if (!cpu_has_virtual_nmis()) {
/*
* Tracking the NMI-blocked state in software is built upon
* finding the next open IRQ window. This, in turn, depends on
* well-behaving guests: They have to keep IRQs disabled at
* least as long as the NMI handler runs. Otherwise we may
* cause NMI nesting, maybe breaking the guest. But as this is
* highly unlikely, we can live with the residual risk.
*/
vmx->soft_vnmi_blocked = 1;
vmx->vnmi_blocked_time = 0;
}
++vcpu->stat.nmi_injections; ++vcpu->stat.nmi_injections;
vmx->nmi_known_unmasked = false; vmx->nmi_known_unmasked = false;
} }
...@@ -5490,8 +5465,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) ...@@ -5490,8 +5465,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
{ {
if (!cpu_has_virtual_nmis())
return to_vmx(vcpu)->soft_vnmi_blocked;
if (to_vmx(vcpu)->nmi_known_unmasked) if (to_vmx(vcpu)->nmi_known_unmasked)
return false; return false;
return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
...@@ -5501,12 +5474,6 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) ...@@ -5501,12 +5474,6 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!cpu_has_virtual_nmis()) {
if (vmx->soft_vnmi_blocked != masked) {
vmx->soft_vnmi_blocked = masked;
vmx->vnmi_blocked_time = 0;
}
} else {
vmx->nmi_known_unmasked = !masked; vmx->nmi_known_unmasked = !masked;
if (masked) if (masked)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
...@@ -5514,7 +5481,6 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) ...@@ -5514,7 +5481,6 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
else else
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI); GUEST_INTR_STATE_NMI);
}
} }
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
...@@ -5522,9 +5488,6 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) ...@@ -5522,9 +5488,6 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
if (to_vmx(vcpu)->nested.nested_run_pending) if (to_vmx(vcpu)->nested.nested_run_pending)
return 0; return 0;
if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
return 0;
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_NMI)); | GUEST_INTR_STATE_NMI));
...@@ -6269,7 +6232,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) ...@@ -6269,7 +6232,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
* AAK134, BY25. * AAK134, BY25.
*/ */
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
cpu_has_virtual_nmis() &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI)) (exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
...@@ -7820,7 +7782,6 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) ...@@ -7820,7 +7782,6 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
* "blocked by NMI" bit has to be set before next VM entry. * "blocked by NMI" bit has to be set before next VM entry.
*/ */
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
cpu_has_virtual_nmis() &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI)) (exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI); GUEST_INTR_STATE_NMI);
...@@ -8492,26 +8453,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) ...@@ -8492,26 +8453,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
!(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
get_vmcs12(vcpu))))) {
if (vmx_interrupt_allowed(vcpu)) {
vmx->soft_vnmi_blocked = 0;
} else if (vmx->vnmi_blocked_time > 1000000000LL &&
vcpu->arch.nmi_pending) {
/*
* This CPU don't support us in finding the end of an
* NMI-blocked window if the guest runs with IRQs
* disabled. So we pull the trigger after 1 s of
* futile waiting, but inform the user about this.
*/
printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
"state on VCPU %d after 1 s timeout\n",
__func__, vcpu->vcpu_id);
vmx->soft_vnmi_blocked = 0;
}
}
if (exit_reason < kvm_vmx_max_exit_handlers if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason]) && kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu); return kvm_vmx_exit_handlers[exit_reason](vcpu);
...@@ -8787,7 +8728,6 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) ...@@ -8787,7 +8728,6 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
if (cpu_has_virtual_nmis()) {
if (vmx->nmi_known_unmasked) if (vmx->nmi_known_unmasked)
return; return;
/* /*
...@@ -8815,9 +8755,6 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) ...@@ -8815,9 +8755,6 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
vmx->nmi_known_unmasked = vmx->nmi_known_unmasked =
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
& GUEST_INTR_STATE_NMI); & GUEST_INTR_STATE_NMI);
} else if (unlikely(vmx->soft_vnmi_blocked))
vmx->vnmi_blocked_time +=
ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
} }
static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
...@@ -8934,10 +8871,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -8934,10 +8871,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long debugctlmsr, cr4; unsigned long debugctlmsr, cr4;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
vmx->entry_time = ktime_get();
/* Don't enter VMX if guest state is invalid, let the exit handler /* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */ start emulation until we arrive back to a valid state */
if (vmx->emulation_required) if (vmx->emulation_required)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment