Commit 1d6f83f6 authored by Oliver Upton's avatar Oliver Upton

KVM: arm64: vgic: Store LPIs in an xarray

Using a linked-list for LPIs is less than ideal as it of course requires
iterative searches to find a particular entry. An xarray is a better
data structure for this use case, as it provides faster searches and can
still handle a potentially sparse range of INTID allocations.

Start by storing LPIs in an xarray, punting usage of the xarray to a
subsequent change. The observant among you will notice that we added yet
another lock to the chain of locking order rules; document the ordering
of the xa_lock. Don't worry, we'll get rid of the lpi_list_lock one
day...
Reviewed-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240221054253.3848076-2-oliver.upton@linux.devSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 6613476e
...@@ -56,6 +56,7 @@ void kvm_vgic_early_init(struct kvm *kvm) ...@@ -56,6 +56,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
INIT_LIST_HEAD(&dist->lpi_list_head); INIT_LIST_HEAD(&dist->lpi_list_head);
INIT_LIST_HEAD(&dist->lpi_translation_cache); INIT_LIST_HEAD(&dist->lpi_translation_cache);
raw_spin_lock_init(&dist->lpi_list_lock); raw_spin_lock_init(&dist->lpi_list_lock);
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
} }
/* CREATION */ /* CREATION */
...@@ -366,6 +367,8 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) ...@@ -366,6 +367,8 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (vgic_supports_direct_msis(kvm)) if (vgic_supports_direct_msis(kvm))
vgic_v4_teardown(kvm); vgic_v4_teardown(kvm);
xa_destroy(&dist->lpi_xa);
} }
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
......
...@@ -52,6 +52,12 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -52,6 +52,12 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
if (!irq) if (!irq)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
if (ret) {
kfree(irq);
return ERR_PTR(ret);
}
INIT_LIST_HEAD(&irq->lpi_list); INIT_LIST_HEAD(&irq->lpi_list);
INIT_LIST_HEAD(&irq->ap_list); INIT_LIST_HEAD(&irq->ap_list);
raw_spin_lock_init(&irq->irq_lock); raw_spin_lock_init(&irq->irq_lock);
...@@ -86,12 +92,22 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -86,12 +92,22 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
goto out_unlock; goto out_unlock;
} }
ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0));
if (ret) {
xa_release(&dist->lpi_xa, intid);
kfree(irq);
goto out_unlock;
}
list_add_tail(&irq->lpi_list, &dist->lpi_list_head); list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
dist->lpi_list_count++; dist->lpi_list_count++;
out_unlock: out_unlock:
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
if (ret)
return ERR_PTR(ret);
/* /*
* We "cache" the configuration table entries in our struct vgic_irq's. * We "cache" the configuration table entries in our struct vgic_irq's.
* However we only have those structs for mapped IRQs, so we read in * However we only have those structs for mapped IRQs, so we read in
......
...@@ -30,7 +30,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -30,7 +30,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* its->its_lock (mutex) * its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled * vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled * kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
* *
* As the ap_list_lock might be taken from the timer interrupt handler, * As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower * we have to disable IRQs before taking this lock and everything lower
...@@ -131,6 +132,7 @@ void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) ...@@ -131,6 +132,7 @@ void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
return; return;
list_del(&irq->lpi_list); list_del(&irq->lpi_list);
xa_erase(&dist->lpi_xa, irq->intid);
dist->lpi_list_count--; dist->lpi_list_count--;
kfree(irq); kfree(irq);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/xarray.h>
#include <kvm/iodev.h> #include <kvm/iodev.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
...@@ -275,6 +276,7 @@ struct vgic_dist { ...@@ -275,6 +276,7 @@ struct vgic_dist {
/* Protects the lpi_list and the count value below. */ /* Protects the lpi_list and the count value below. */
raw_spinlock_t lpi_list_lock; raw_spinlock_t lpi_list_lock;
struct xarray lpi_xa;
struct list_head lpi_list_head; struct list_head lpi_list_head;
int lpi_list_count; int lpi_list_count;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment