Commit ae705930 authored by Christoffer Dall's avatar Christoffer Dall

arm/arm64: KVM: Keep elrsr/aisr in sync with software model

There is an interesting bug in the vgic code, which manifests itself
when the KVM run loop has a signal pending or needs a vmid generation
rollover after having disabled interrupts but before actually switching
to the guest.

In this case, we flush the vgic as usual, but we sync back the vgic
state and exit to userspace before entering the guest.  The consequence
is that we will be syncing the list registers back to the software model
using the GICH_ELRSR and GICH_EISR from the last execution of the guest,
potentially overwriting a list register containing an interrupt.

This showed up during migration testing where we would capture a state
where the VM has masked the arch timer but there were no interrupts,
resulting in a hung test.

Cc: Marc Zyngier <marc.zyngier@arm.com>
Reported-by: default avatarAlex Bennee <alex.bennee@linaro.org>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent b52104e5
...@@ -114,6 +114,7 @@ struct vgic_ops { ...@@ -114,6 +114,7 @@ struct vgic_ops {
void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu); u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
void (*clear_eisr)(struct kvm_vcpu *vcpu);
u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
void (*enable_underflow)(struct kvm_vcpu *vcpu); void (*enable_underflow)(struct kvm_vcpu *vcpu);
void (*disable_underflow)(struct kvm_vcpu *vcpu); void (*disable_underflow)(struct kvm_vcpu *vcpu);
......
...@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, ...@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
{ {
if (!(lr_desc.state & LR_STATE_MASK)) if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
else
vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
} }
static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
...@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) ...@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
} }
static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
{
vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
}
static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
{ {
u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
...@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = { ...@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = {
.sync_lr_elrsr = vgic_v2_sync_lr_elrsr, .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
.get_elrsr = vgic_v2_get_elrsr, .get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr, .get_eisr = vgic_v2_get_eisr,
.clear_eisr = vgic_v2_clear_eisr,
.get_interrupt_status = vgic_v2_get_interrupt_status, .get_interrupt_status = vgic_v2_get_interrupt_status,
.enable_underflow = vgic_v2_enable_underflow, .enable_underflow = vgic_v2_enable_underflow,
.disable_underflow = vgic_v2_disable_underflow, .disable_underflow = vgic_v2_disable_underflow,
......
...@@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, ...@@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
{ {
if (!(lr_desc.state & LR_STATE_MASK)) if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
else
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
} }
static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
...@@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) ...@@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
} }
static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
{
vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
}
static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
{ {
u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
...@@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = { ...@@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = {
.sync_lr_elrsr = vgic_v3_sync_lr_elrsr, .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
.get_elrsr = vgic_v3_get_elrsr, .get_elrsr = vgic_v3_get_elrsr,
.get_eisr = vgic_v3_get_eisr, .get_eisr = vgic_v3_get_eisr,
.clear_eisr = vgic_v3_clear_eisr,
.get_interrupt_status = vgic_v3_get_interrupt_status, .get_interrupt_status = vgic_v3_get_interrupt_status,
.enable_underflow = vgic_v3_enable_underflow, .enable_underflow = vgic_v3_enable_underflow,
.disable_underflow = vgic_v3_disable_underflow, .disable_underflow = vgic_v3_disable_underflow,
......
...@@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) ...@@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
return vgic_ops->get_eisr(vcpu); return vgic_ops->get_eisr(vcpu);
} }
static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
{
vgic_ops->clear_eisr(vcpu);
}
static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
{ {
return vgic_ops->get_interrupt_status(vcpu); return vgic_ops->get_interrupt_status(vcpu);
...@@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) ...@@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
vgic_set_lr(vcpu, lr_nr, vlr); vgic_set_lr(vcpu, lr_nr, vlr);
clear_bit(lr_nr, vgic_cpu->lr_used); clear_bit(lr_nr, vgic_cpu->lr_used);
vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
} }
/* /*
...@@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) ...@@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
vlr.state |= LR_STATE_PENDING; vlr.state |= LR_STATE_PENDING;
vgic_set_lr(vcpu, lr, vlr); vgic_set_lr(vcpu, lr, vlr);
vgic_sync_lr_elrsr(vcpu, lr, vlr);
return true; return true;
} }
} }
...@@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) ...@@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
vlr.state |= LR_EOI_INT; vlr.state |= LR_EOI_INT;
vgic_set_lr(vcpu, lr, vlr); vgic_set_lr(vcpu, lr, vlr);
vgic_sync_lr_elrsr(vcpu, lr, vlr);
return true; return true;
} }
...@@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) ...@@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
if (status & INT_STATUS_UNDERFLOW) if (status & INT_STATUS_UNDERFLOW)
vgic_disable_underflow(vcpu); vgic_disable_underflow(vcpu);
/*
* In the next iterations of the vcpu loop, if we sync the vgic state
* after flushing it, but before entering the guest (this happens for
* pending signals and vmid rollovers), then make sure we don't pick
* up any old maintenance interrupts here.
*/
vgic_clear_eisr(vcpu);
return level_pending; return level_pending;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment