Commit 388d4359 authored by Andre Przywara's avatar Andre Przywara Committed by Paolo Bonzini

KVM: arm/arm64: Properly protect VGIC locks from IRQs

As Jan reported [1], lockdep complains about the VGIC not being bullet
proof. This seems to be due to two issues:
- When commit 006df0f3 ("KVM: arm/arm64: Support calling
  vgic_update_irq_pending from irq context") promoted irq_lock and
  ap_list_lock to _irqsave, we forgot two instances of irq_lock.
  lockdeps seems to pick those up.
- If a lock is _irqsave, any other locks we take inside them should be
  _irqsafe as well. So the lpi_list_lock needs to be promoted also.

This fixes both issues by simply making the remaining instances of those
locks _irqsave.
One irq_lock is addressed in a separate patch, to simplify backporting.

[1] http://lists.infradead.org/pipermail/linux-arm-kernel/2018-May/575718.html

Cc: stable@vger.kernel.org
Fixes: 006df0f3 ("KVM: arm/arm64: Support calling vgic_update_irq_pending from irq context")
Reported-by: default avatarJan Glauber <jan.glauber@caviumnetworks.com>
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarAndre Przywara <andre.przywara@arm.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4c27625b
...@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v) ...@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
struct vgic_state_iter *iter = (struct vgic_state_iter *)v; struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
struct vgic_irq *irq; struct vgic_irq *irq;
struct kvm_vcpu *vcpu = NULL; struct kvm_vcpu *vcpu = NULL;
unsigned long flags;
if (iter->dist_id == 0) { if (iter->dist_id == 0) {
print_dist_state(s, &kvm->arch.vgic); print_dist_state(s, &kvm->arch.vgic);
...@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) ...@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
} }
spin_lock(&irq->irq_lock); spin_lock_irqsave(&irq->irq_lock, flags);
print_irq_state(s, irq, vcpu); print_irq_state(s, irq, vcpu);
spin_unlock(&irq->irq_lock); spin_unlock_irqrestore(&irq->irq_lock, flags);
return 0; return 0;
} }
......
...@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
unsigned long flags;
int ret; int ret;
/* In this case there is no put, since we keep the reference. */ /* In this case there is no put, since we keep the reference. */
...@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->intid = intid; irq->intid = intid;
irq->target_vcpu = vcpu; irq->target_vcpu = vcpu;
spin_lock(&dist->lpi_list_lock); spin_lock_irqsave(&dist->lpi_list_lock, flags);
/* /*
* There could be a race with another vgic_add_lpi(), so we need to * There could be a race with another vgic_add_lpi(), so we need to
...@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
dist->lpi_list_count++; dist->lpi_list_count++;
out_unlock: out_unlock:
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
/* /*
* We "cache" the configuration table entries in our struct vgic_irq's. * We "cache" the configuration table entries in our struct vgic_irq's.
...@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_irq *irq; struct vgic_irq *irq;
unsigned long flags;
u32 *intids; u32 *intids;
int irq_count, i = 0; int irq_count, i = 0;
...@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
if (!intids) if (!intids)
return -ENOMEM; return -ENOMEM;
spin_lock(&dist->lpi_list_lock); spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (i == irq_count) if (i == irq_count)
break; break;
...@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue; continue;
intids[i++] = irq->intid; intids[i++] = irq->intid;
} }
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids; *intid_ptr = intids;
return i; return i;
......
...@@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* kvm->lock (mutex) * kvm->lock (mutex)
* its->cmd_lock (mutex) * its->cmd_lock (mutex)
* its->its_lock (mutex) * its->its_lock (mutex)
* vgic_cpu->ap_list_lock * vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock * kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock * vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower
* than it.
* *
* If you need to take multiple locks, always take the upper lock first, * If you need to take multiple locks, always take the upper lock first,
* then the lower ones, e.g. first take the its_lock, then the irq_lock. * then the lower ones, e.g. first take the its_lock, then the irq_lock.
...@@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) ...@@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = NULL; struct vgic_irq *irq = NULL;
unsigned long flags;
spin_lock(&dist->lpi_list_lock); spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (irq->intid != intid) if (irq->intid != intid)
...@@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) ...@@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
irq = NULL; irq = NULL;
out_unlock: out_unlock:
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq; return irq;
} }
...@@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref) ...@@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref)
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long flags;
if (irq->intid < VGIC_MIN_LPI) if (irq->intid < VGIC_MIN_LPI)
return; return;
spin_lock(&dist->lpi_list_lock); spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (!kref_put(&irq->refcount, vgic_irq_release)) { if (!kref_put(&irq->refcount, vgic_irq_release)) {
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return; return;
}; };
list_del(&irq->lpi_list); list_del(&irq->lpi_list);
dist->lpi_list_count--; dist->lpi_list_count--;
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
kfree(irq); kfree(irq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment