Commit d0823cb3 authored by Jia He's avatar Jia He Committed by Marc Zyngier

KVM: arm/arm64: vgic: Do not use spin_lock_irqsave/restore with irq disabled

kvm_vgic_sync_hwstate is only called with IRQ being disabled.
There is thus no need to call spin_lock_irqsave/restore in
vgic_fold_lr_state and vgic_prune_ap_list.

This patch replace them with the non irq-safe version.
Signed-off-by: default avatarJia He <jia.he@hxt-semitech.com>
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
[maz: commit message tidy-up]
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent dc961e53
...@@ -62,7 +62,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -62,7 +62,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
int lr; int lr;
unsigned long flags;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
cpuif->vgic_hcr &= ~GICH_HCR_UIE; cpuif->vgic_hcr &= ~GICH_HCR_UIE;
...@@ -83,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -83,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
irq = vgic_get_irq(vcpu->kvm, vcpu, intid); irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock_irqsave(&irq->irq_lock, flags); spin_lock(&irq->irq_lock);
/* Always preserve the active bit */ /* Always preserve the active bit */
irq->active = !!(val & GICH_LR_ACTIVE_BIT); irq->active = !!(val & GICH_LR_ACTIVE_BIT);
...@@ -126,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -126,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_irq_set_phys_active(irq, false); vgic_irq_set_phys_active(irq, false);
} }
spin_unlock_irqrestore(&irq->irq_lock, flags); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
......
...@@ -46,7 +46,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -46,7 +46,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model; u32 model = vcpu->kvm->arch.vgic.vgic_model;
int lr; int lr;
unsigned long flags;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
cpuif->vgic_hcr &= ~ICH_HCR_UIE; cpuif->vgic_hcr &= ~ICH_HCR_UIE;
...@@ -75,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -75,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
if (!irq) /* An LPI could have been unmapped. */ if (!irq) /* An LPI could have been unmapped. */
continue; continue;
spin_lock_irqsave(&irq->irq_lock, flags); spin_lock(&irq->irq_lock);
/* Always preserve the active bit */ /* Always preserve the active bit */
irq->active = !!(val & ICH_LR_ACTIVE_BIT); irq->active = !!(val & ICH_LR_ACTIVE_BIT);
...@@ -118,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -118,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_irq_set_phys_active(irq, false); vgic_irq_set_phys_active(irq, false);
} }
spin_unlock_irqrestore(&irq->irq_lock, flags); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
......
...@@ -593,10 +593,11 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -593,10 +593,11 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp; struct vgic_irq *irq, *tmp;
unsigned long flags;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
retry: retry:
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); spin_lock(&vgic_cpu->ap_list_lock);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
...@@ -637,7 +638,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -637,7 +638,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
/* This interrupt looks like it has to be migrated. */ /* This interrupt looks like it has to be migrated. */
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); spin_unlock(&vgic_cpu->ap_list_lock);
/* /*
* Ensure locking order by always locking the smallest * Ensure locking order by always locking the smallest
...@@ -651,7 +652,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -651,7 +652,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
vcpuB = vcpu; vcpuB = vcpu;
} }
spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
SINGLE_DEPTH_NESTING); SINGLE_DEPTH_NESTING);
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
...@@ -676,7 +677,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -676,7 +677,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
if (target_vcpu_needs_kick) { if (target_vcpu_needs_kick) {
kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
...@@ -686,7 +687,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -686,7 +687,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
goto retry; goto retry;
} }
spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); spin_unlock(&vgic_cpu->ap_list_lock);
} }
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment