Commit 54b44ad2 authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/sgi-injection into kvmarm/next

* kvm-arm64/sgi-injection:
  : vSGI injection improvements + fixes, courtesy Marc Zyngier
  :
  : Avoid linearly searching for vSGI targets using a compressed MPIDR to
  : index a cache. While at it, fix some egregious bugs in KVM's mishandling
  : of vcpuid (user-controlled value) and vcpu_idx.
  KVM: arm64: Clarify the ordering requirements for vcpu/RD creation
  KVM: arm64: vgic-v3: Optimize affinity-based SGI injection
  KVM: arm64: Fast-track kvm_mpidr_to_vcpu() when mpidr_data is available
  KVM: arm64: Build MPIDR to vcpu index cache at runtime
  KVM: arm64: Simplify kvm_vcpu_get_mpidr_aff()
  KVM: arm64: Use vcpu_idx for invalidation tracking
  KVM: arm64: vgic: Use vcpu_idx for the debug information
  KVM: arm64: vgic-v2: Use cpuid from userspace as vcpu_id
  KVM: arm64: vgic-v3: Refactor GICv3 SGI generation
  KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id
  KVM: arm64: vgic: Make kvm_vgic_inject_irq() take a vcpu pointer
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents df26b779 f9940416
......@@ -59,6 +59,13 @@ Groups:
It is invalid to mix calls with KVM_VGIC_V3_ADDR_TYPE_REDIST and
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION attributes.
Note that to obtain reproducible results (the same VCPU being associated
with the same redistributor across a save/restore operation), VCPU creation
order, redistributor region creation order as well as the respective
interleaves of VCPU and region creation MUST be preserved. Any change in
either ordering may result in a different vcpu_id/redistributor association,
resulting in a VM that will fail to run at restore time.
Errors:
======= =============================================================
......
......@@ -470,7 +470,7 @@ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
......
......@@ -212,6 +212,31 @@ struct kvm_protected_vm {
struct kvm_hyp_memcache teardown_mc;
};
struct kvm_mpidr_data {
u64 mpidr_mask;
DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
};
static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
{
unsigned long mask = data->mpidr_mask;
u64 aff = mpidr & MPIDR_HWID_BITMASK;
int nbits, bit, bit_idx = 0;
u16 index = 0;
/*
* If this looks like RISC-V's BEXT or x86's PEXT
* instructions, it isn't by accident.
*/
nbits = fls(mask);
for_each_set_bit(bit, &mask, nbits) {
index |= (aff & BIT(bit)) >> (bit - bit_idx);
bit_idx++;
}
return index;
}
struct kvm_arch {
struct kvm_s2_mmu mmu;
......@@ -253,6 +278,9 @@ struct kvm_arch {
/* VM-wide vCPU feature set */
DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
/* MPIDR to vcpu index mapping, optional */
struct kvm_mpidr_data *mpidr_data;
/*
* VM-wide PMU filter, implemented as a bitmap and big enough for
* up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
......
......@@ -458,7 +458,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
timer_ctx->irq.level);
if (!userspace_irqchip(vcpu->kvm)) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
timer_irq(timer_ctx),
timer_ctx->irq.level,
timer_ctx);
......
......@@ -205,6 +205,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm);
kfree(kvm->arch.mpidr_data);
kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
......@@ -437,9 +438,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* We might get preempted before the vCPU actually runs, but
* over-invalidation doesn't affect correctness.
*/
if (*last_ran != vcpu->vcpu_id) {
if (*last_ran != vcpu->vcpu_idx) {
kvm_call_hyp(__kvm_flush_cpu_context, mmu);
*last_ran = vcpu->vcpu_id;
*last_ran = vcpu->vcpu_idx;
}
vcpu->cpu = cpu;
......@@ -577,6 +578,57 @@ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
}
static void kvm_init_mpidr_data(struct kvm *kvm)
{
struct kvm_mpidr_data *data = NULL;
unsigned long c, mask, nr_entries;
u64 aff_set = 0, aff_clr = ~0UL;
struct kvm_vcpu *vcpu;
mutex_lock(&kvm->arch.config_lock);
if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
goto out;
kvm_for_each_vcpu(c, vcpu, kvm) {
u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
aff_set |= aff;
aff_clr &= aff;
}
/*
* A significant bit can be either 0 or 1, and will only appear in
* aff_set. Use aff_clr to weed out the useless stuff.
*/
mask = aff_set ^ aff_clr;
nr_entries = BIT_ULL(hweight_long(mask));
/*
* Don't let userspace fool us. If we need more than a single page
* to describe the compressed MPIDR array, just fall back to the
* iterative method. Single vcpu VMs do not need this either.
*/
if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries),
GFP_KERNEL_ACCOUNT);
if (!data)
goto out;
data->mpidr_mask = mask;
kvm_for_each_vcpu(c, vcpu, kvm) {
u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
u16 index = kvm_mpidr_index(data, aff);
data->cmpidr_to_idx[index] = c;
}
kvm->arch.mpidr_data = data;
out:
mutex_unlock(&kvm->arch.config_lock);
}
/*
* Handle both the initialisation that is being done when the vcpu is
* run for the first time, as well as the updates that must be
......@@ -600,6 +652,8 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (likely(vcpu_has_run_once(vcpu)))
return 0;
kvm_init_mpidr_data(kvm);
kvm_arm_vcpu_init_debug(vcpu);
if (likely(irqchip_in_kernel(kvm))) {
......@@ -1136,27 +1190,23 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status)
{
u32 irq = irq_level->irq;
unsigned int irq_type, vcpu_idx, irq_num;
int nrcpus = atomic_read(&kvm->online_vcpus);
unsigned int irq_type, vcpu_id, irq_num;
struct kvm_vcpu *vcpu = NULL;
bool level = irq_level->level;
irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
switch (irq_type) {
case KVM_ARM_IRQ_TYPE_CPU:
if (irqchip_in_kernel(kvm))
return -ENXIO;
if (vcpu_idx >= nrcpus)
return -EINVAL;
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
if (!vcpu)
return -EINVAL;
......@@ -1168,17 +1218,14 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (!irqchip_in_kernel(kvm))
return -ENXIO;
if (vcpu_idx >= nrcpus)
return -EINVAL;
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
if (!vcpu)
return -EINVAL;
if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL);
case KVM_ARM_IRQ_TYPE_SPI:
if (!irqchip_in_kernel(kvm))
return -ENXIO;
......@@ -1186,7 +1233,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (irq_num < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL);
}
return -EINVAL;
......@@ -2378,6 +2425,18 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
unsigned long i;
mpidr &= MPIDR_HWID_BITMASK;
if (kvm->arch.mpidr_data) {
u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
vcpu = kvm_get_vcpu(kvm,
kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
vcpu = NULL;
return vcpu;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
return vcpu;
......
......@@ -365,7 +365,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
pmu->irq_level = overflow;
if (likely(irqchip_in_kernel(vcpu->kvm))) {
int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
pmu->irq_num, overflow, pmu);
WARN_ON(ret);
}
......
......@@ -166,7 +166,7 @@ static void print_header(struct seq_file *s, struct vgic_irq *irq,
if (vcpu) {
hdr = "VCPU";
id = vcpu->vcpu_id;
id = vcpu->vcpu_idx;
}
seq_printf(s, "\n");
......@@ -212,7 +212,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
" %2d "
"\n",
type, irq->intid,
(irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1,
(irq->target_vcpu) ? irq->target_vcpu->vcpu_idx : -1,
pending,
irq->line_level,
irq->active,
......@@ -224,7 +224,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
irq->mpidr,
irq->source,
irq->priority,
(irq->vcpu) ? irq->vcpu->vcpu_id : -1);
(irq->vcpu) ? irq->vcpu->vcpu_idx : -1);
}
static int vgic_debug_show(struct seq_file *s, void *v)
......
......@@ -23,7 +23,7 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e,
if (!vgic_valid_spi(kvm, spi_id))
return -EINVAL;
return kvm_vgic_inject_irq(kvm, 0, spi_id, level, NULL);
return kvm_vgic_inject_irq(kvm, NULL, spi_id, level, NULL);
}
/**
......
......@@ -378,6 +378,12 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
return ret;
}
static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
struct its_collection *col)
{
return kvm_get_vcpu_by_id(kvm, col->target_addr);
}
/*
* Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
* is targeting) to the VGIC's view, which deals with target VCPUs.
......@@ -391,7 +397,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
if (!its_is_collection_mapped(ite->collection))
return;
vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
vcpu = collection_to_vcpu(kvm, ite->collection);
update_affinity(ite->irq, vcpu);
}
......@@ -679,7 +685,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
if (!ite || !its_is_collection_mapped(ite->collection))
return E_ITS_INT_UNMAPPED_INTERRUPT;
vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
vcpu = collection_to_vcpu(kvm, ite->collection);
if (!vcpu)
return E_ITS_INT_UNMAPPED_INTERRUPT;
......@@ -887,7 +893,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
return E_ITS_MOVI_UNMAPPED_COLLECTION;
ite->collection = collection;
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vcpu = collection_to_vcpu(kvm, collection);
vgic_its_invalidate_cache(kvm);
......@@ -1121,7 +1127,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
}
if (its_is_collection_mapped(collection))
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vcpu = collection_to_vcpu(kvm, collection);
irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
if (IS_ERR(irq)) {
......@@ -1242,21 +1248,22 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd)
{
u16 coll_id;
u32 target_addr;
struct its_collection *collection;
bool valid;
valid = its_cmd_get_validbit(its_cmd);
coll_id = its_cmd_get_collection(its_cmd);
target_addr = its_cmd_get_target_addr(its_cmd);
if (target_addr >= atomic_read(&kvm->online_vcpus))
return E_ITS_MAPC_PROCNUM_OOR;
if (!valid) {
vgic_its_free_collection(its, coll_id);
vgic_its_invalidate_cache(kvm);
} else {
struct kvm_vcpu *vcpu;
vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
if (!vcpu)
return E_ITS_MAPC_PROCNUM_OOR;
collection = find_collection(its, coll_id);
if (!collection) {
......@@ -1270,9 +1277,9 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
coll_id);
if (ret)
return ret;
collection->target_addr = target_addr;
collection->target_addr = vcpu->vcpu_id;
} else {
collection->target_addr = target_addr;
collection->target_addr = vcpu->vcpu_id;
update_affinity_collection(kvm, its, collection);
}
}
......@@ -1382,7 +1389,7 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
if (!its_is_collection_mapped(collection))
return E_ITS_INVALL_UNMAPPED_COLLECTION;
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vcpu = collection_to_vcpu(kvm, collection);
vgic_its_invall(vcpu);
return 0;
......@@ -1399,23 +1406,21 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd)
{
u32 target1_addr = its_cmd_get_target_addr(its_cmd);
u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
struct kvm_vcpu *vcpu1, *vcpu2;
struct vgic_irq *irq;
u32 *intids;
int irq_count, i;
if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
target2_addr >= atomic_read(&kvm->online_vcpus))
/* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32));
if (!vcpu1 || !vcpu2)
return E_ITS_MOVALL_PROCNUM_OOR;
if (target1_addr == target2_addr)
if (vcpu1 == vcpu2)
return 0;
vcpu1 = kvm_get_vcpu(kvm, target1_addr);
vcpu2 = kvm_get_vcpu(kvm, target2_addr);
irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
if (irq_count < 0)
return irq_count;
......@@ -2258,7 +2263,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
return PTR_ERR(ite);
if (its_is_collection_mapped(collection))
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr);
irq = vgic_add_lpi(kvm, lpi_id, vcpu);
if (IS_ERR(irq)) {
......@@ -2573,7 +2578,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
coll_id = val & KVM_ITS_CTE_ICID_MASK;
if (target_addr != COLLECTION_NOT_MAPPED &&
target_addr >= atomic_read(&kvm->online_vcpus))
!kvm_get_vcpu_by_id(kvm, target_addr))
return -EINVAL;
collection = find_collection(its, coll_id);
......
......@@ -340,13 +340,9 @@ int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
{
int cpuid;
cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
KVM_DEV_ARM_VGIC_CPUID_SHIFT;
cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
return -EINVAL;
reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
return 0;
......
......@@ -1013,35 +1013,6 @@ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
return 0;
}
/*
* Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
* generation register ICC_SGI1R_EL1) with a given VCPU.
* If the VCPU's MPIDR matches, return the level0 affinity, otherwise
* return -1.
*/
static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
{
unsigned long affinity;
int level0;
/*
* Split the current VCPU's MPIDR into affinity level 0 and the
* rest as this is what we have to compare against.
*/
affinity = kvm_vcpu_get_mpidr_aff(vcpu);
level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
affinity &= ~MPIDR_LEVEL_MASK;
/* bail out if the upper three levels don't match */
if (sgi_aff != affinity)
return -1;
/* Is this VCPU's bit set in the mask ? */
if (!(sgi_cpu_mask & BIT(level0)))
return -1;
return level0;
}
/*
* The ICC_SGI* registers encode the affinity differently from the MPIDR,
......@@ -1052,6 +1023,38 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
>> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1)
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, sgi);
unsigned long flags;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/*
* An access targeting Group0 SGIs can only generate
* those, while an access targeting Group1 SGIs can
* generate interrupts of either group.
*/
if (!irq->group || allow_group1) {
if (!irq->hw) {
irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
/* HW SGI? Ask the GIC to inject it */
int err;
err = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
true);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
} else {
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
}
/**
* vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
* @vcpu: The VCPU requesting a SGI
......@@ -1062,83 +1065,46 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
* This will trap in sys_regs.c and call this function.
* This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
* target processors as well as a bitmask of 16 Aff0 CPUs.
* If the interrupt routing mode bit is not set, we iterate over all VCPUs to
* check for matching ones. If this bit is set, we signal all, but not the
* calling VCPU.
*
* If the interrupt routing mode bit is not set, we iterate over the Aff0
* bits and signal the VCPUs matching the provided Aff{3,2,1}.
*
* If this bit is set, we signal all, but not the calling VCPU.
*/
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *c_vcpu;
u16 target_cpus;
unsigned long target_cpus;
u64 mpidr;
int sgi;
int vcpu_id = vcpu->vcpu_id;
bool broadcast;
unsigned long c, flags;
sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
mpidr = SGI_AFFINITY_LEVEL(reg, 3);
mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
/*
* We iterate over all VCPUs to find the MPIDRs matching the request.
* If we have handled one CPU, we clear its bit to detect early
* if we are already finished. This avoids iterating through all
* VCPUs when most of the times we just signal a single VCPU.
*/
kvm_for_each_vcpu(c, c_vcpu, kvm) {
struct vgic_irq *irq;
/* Exit early if we have dealt with all requested CPUs */
if (!broadcast && target_cpus == 0)
break;
/* Don't signal the calling VCPU */
if (broadcast && c == vcpu_id)
continue;
u32 sgi, aff0;
unsigned long c;
if (!broadcast) {
int level0;
sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg);
level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
if (level0 == -1)
/* Broadcast */
if (unlikely(reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT))) {
kvm_for_each_vcpu(c, c_vcpu, kvm) {
/* Don't signal the calling VCPU */
if (c_vcpu == vcpu)
continue;
/* remove this matching VCPU from the mask */
target_cpus &= ~BIT(level0);
vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
}
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
return;
}
/*
* An access targeting Group0 SGIs can only generate
* those, while an access targeting Group1 SGIs can
* generate interrupts of either group.
*/
if (!irq->group || allow_group1) {
if (!irq->hw) {
irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
/* HW SGI? Ask the GIC to inject it */
int err;
err = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
true);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
} else {
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
/* We iterate over affinities to find the corresponding vcpus */
mpidr = SGI_AFFINITY_LEVEL(reg, 3);
mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg);
vgic_put_irq(vcpu->kvm, irq);
for_each_set_bit(aff0, &target_cpus, hweight_long(ICC_SGI1R_TARGET_LIST_MASK)) {
c_vcpu = kvm_mpidr_to_vcpu(kvm, mpidr | aff0);
if (c_vcpu)
vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
}
}
......
......@@ -422,7 +422,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
/**
* kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
* @kvm: The VM structure pointer
* @cpuid: The CPU for PPIs
* @vcpu: The CPU for PPIs or NULL for global interrupts
* @intid: The INTID to inject a new state to.
* @level: Edge-triggered: true: to trigger the interrupt
* false: to ignore the call
......@@ -436,24 +436,22 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
* level-sensitive interrupts. You can think of the level parameter as 1
* being HIGH and 0 being LOW and all devices being active-HIGH.
*/
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner)
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int intid, bool level, void *owner)
{
struct kvm_vcpu *vcpu;
struct vgic_irq *irq;
unsigned long flags;
int ret;
trace_vgic_update_irq_pending(cpuid, intid, level);
ret = vgic_lazy_init(kvm);
if (ret)
return ret;
vcpu = kvm_get_vcpu(kvm, cpuid);
if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level);
irq = vgic_get_irq(kvm, vcpu, intid);
if (!irq)
return -EINVAL;
......
......@@ -375,8 +375,8 @@ int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner);
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int intid, bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
u32 vintid, struct irq_ops *ops);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment