Commit 8fdb2351 authored by Jan Kiszka's avatar Jan Kiszka Committed by Avi Kivity

KVM: x86: Fix and refactor NMI watchdog emulation

This patch refactors the NMI watchdog delivery patch, consolidating
tests and providing a proper API for delivering watchdog events.

An included micro-optimization is to check only for apic_hw_enabled in
kvm_apic_local_deliver (the test for LVT mask is covering the
soft-disabled case already).
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Acked-by: default avatarSheng Yang <sheng@linux.intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 291fd39b
...@@ -612,15 +612,18 @@ static void __inject_pit_timer_intr(struct kvm *kvm) ...@@ -612,15 +612,18 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
/* /*
* Provides NMI watchdog support in IOAPIC mode. * Provides NMI watchdog support via Virtual Wire mode.
* The route is: PIT -> PIC -> LVT0 in NMI mode, * The route is: PIT -> PIC -> LVT0 in NMI mode.
* timer IRQs will continue to flow through the IOAPIC. *
* Note: Our Virtual Wire implementation is simplified, only
* propagating PIT interrupts to all VCPUs when they have set
* LVT0 to NMI delivery. Other PIC interrupts are just sent to
* VCPU0, and only if its LVT0 is in EXTINT mode.
*/ */
for (i = 0; i < KVM_MAX_VCPUS; ++i) { for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i]; vcpu = kvm->vcpus[i];
if (!vcpu) if (vcpu)
continue; kvm_apic_nmi_wd_deliver(vcpu);
kvm_apic_local_deliver(vcpu, APIC_LVT0);
} }
} }
......
...@@ -87,7 +87,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s); ...@@ -87,7 +87,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s);
void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type); void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu); void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
void __kvm_migrate_timers(struct kvm_vcpu *vcpu); void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
......
...@@ -973,14 +973,12 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -973,14 +973,12 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type) static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; u32 reg = apic_get_reg(apic, lvt_type);
int vector, mode, trig_mode; int vector, mode, trig_mode;
u32 reg;
if (apic && apic_enabled(apic)) { if (apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
reg = apic_get_reg(apic, lvt_type);
vector = reg & APIC_VECTOR_MASK; vector = reg & APIC_VECTOR_MASK;
mode = reg & APIC_MODE_MASK; mode = reg & APIC_MODE_MASK;
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
...@@ -989,9 +987,12 @@ int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type) ...@@ -989,9 +987,12 @@ int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type)
return 0; return 0;
} }
static inline int __inject_apic_timer_irq(struct kvm_lapic *apic) void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
{ {
return kvm_apic_local_deliver(apic->vcpu, APIC_LVTT); struct kvm_lapic *apic = vcpu->arch.apic;
if (apic)
kvm_apic_local_deliver(apic, APIC_LVT0);
} }
static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
...@@ -1086,9 +1087,8 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) ...@@ -1086,9 +1087,8 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (apic && apic_lvt_enabled(apic, APIC_LVTT) && if (apic && atomic_read(&apic->timer.pending) > 0) {
atomic_read(&apic->timer.pending) > 0) { if (kvm_apic_local_deliver(apic, APIC_LVTT))
if (__inject_apic_timer_irq(apic))
atomic_dec(&apic->timer.pending); atomic_dec(&apic->timer.pending);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment