Commit ad7937dc authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/selftest/irq-injection into kvmarm-master/next

* kvm-arm64/selftest/irq-injection:
  : .
  : New tests from Ricardo Koller:
  : "This series adds a new test, aarch64/vgic-irq, that validates the injection of
  : different types of IRQs from userspace using various methods and configurations"
  : .
  KVM: selftests: aarch64: Add test for restoring active IRQs
  KVM: selftests: aarch64: Add ISPENDR write tests in vgic_irq
  KVM: selftests: aarch64: Add tests for IRQFD in vgic_irq
  KVM: selftests: Add IRQ GSI routing library functions
  KVM: selftests: aarch64: Add test_inject_fail to vgic_irq
  KVM: selftests: aarch64: Add tests for LEVEL_INFO in vgic_irq
  KVM: selftests: aarch64: Level-sensitive interrupts tests in vgic_irq
  KVM: selftests: aarch64: Add preemption tests in vgic_irq
  KVM: selftests: aarch64: Cmdline arg to set EOI mode in vgic_irq
  KVM: selftests: aarch64: Cmdline arg to set number of IRQs in vgic_irq test
  KVM: selftests: aarch64: Abstract the injection functions in vgic_irq
  KVM: selftests: aarch64: Add vgic_irq to test userspace IRQ injection
  KVM: selftests: aarch64: Add vGIC library functions to deal with vIRQ state
  KVM: selftests: Add kvm_irq_line library function
  KVM: selftests: aarch64: Add GICv3 register accessor library functions
  KVM: selftests: aarch64: Add function for accessing GICv3 dist and redist registers
  KVM: selftests: aarch64: Move gic_v3.h to shared headers
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 089606c0 728fcc46
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
/aarch64/get-reg-list /aarch64/get-reg-list
/aarch64/psci_cpu_on_test /aarch64/psci_cpu_on_test
/aarch64/vgic_init /aarch64/vgic_init
/aarch64/vgic_irq
/s390x/memop /s390x/memop
/s390x/resets /s390x/resets
/s390x/sync_regs_test /s390x/sync_regs_test
......
...@@ -94,6 +94,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions ...@@ -94,6 +94,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
......
...@@ -382,7 +382,7 @@ static struct kvm_vm *test_vm_create(void) ...@@ -382,7 +382,7 @@ static struct kvm_vm *test_vm_create(void)
ucall_init(vm, NULL); ucall_init(vm, NULL);
test_init_timer_irq(vm); test_init_timer_irq(vm);
vgic_v3_setup(vm, nr_vcpus, GICD_BASE_GPA, GICR_BASE_GPA); vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
/* Make all the test's cmdline args visible to the guest */ /* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args); sync_global_to_guest(vm, test_args);
......
This diff is collapsed.
...@@ -11,11 +11,37 @@ enum gic_type { ...@@ -11,11 +11,37 @@ enum gic_type {
GIC_TYPE_MAX, GIC_TYPE_MAX,
}; };
#define MIN_SGI 0
#define MIN_PPI 16
#define MIN_SPI 32
#define MAX_SPI 1019
#define IAR_SPURIOUS 1023
#define INTID_IS_SGI(intid) (0 <= (intid) && (intid) < MIN_PPI)
#define INTID_IS_PPI(intid) (MIN_PPI <= (intid) && (intid) < MIN_SPI)
#define INTID_IS_SPI(intid) (MIN_SPI <= (intid) && (intid) <= MAX_SPI)
void gic_init(enum gic_type type, unsigned int nr_cpus, void gic_init(enum gic_type type, unsigned int nr_cpus,
void *dist_base, void *redist_base); void *dist_base, void *redist_base);
void gic_irq_enable(unsigned int intid); void gic_irq_enable(unsigned int intid);
void gic_irq_disable(unsigned int intid); void gic_irq_disable(unsigned int intid);
unsigned int gic_get_and_ack_irq(void); unsigned int gic_get_and_ack_irq(void);
void gic_set_eoi(unsigned int intid); void gic_set_eoi(unsigned int intid);
void gic_set_dir(unsigned int intid);
/*
* Sets the EOI mode. When split is false, EOI just drops the priority. When
* split is true, EOI drops the priority and deactivates the interrupt.
*/
void gic_set_eoi_split(bool split);
void gic_set_priority_mask(uint64_t mask);
void gic_set_priority(uint32_t intid, uint32_t prio);
void gic_irq_set_active(unsigned int intid);
void gic_irq_clear_active(unsigned int intid);
bool gic_irq_get_active(unsigned int intid);
void gic_irq_set_pending(unsigned int intid);
void gic_irq_clear_pending(unsigned int intid);
bool gic_irq_get_pending(unsigned int intid);
void gic_irq_set_config(unsigned int intid, bool is_edge);
#endif /* SELFTEST_KVM_GIC_H */ #endif /* SELFTEST_KVM_GIC_H */
...@@ -16,8 +16,12 @@ ...@@ -16,8 +16,12 @@
#define GICD_IGROUPR 0x0080 #define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100 #define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180 #define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
#define GICD_ICPENDR 0x0280
#define GICD_ICACTIVER 0x0380 #define GICD_ICACTIVER 0x0380
#define GICD_ISACTIVER 0x0300
#define GICD_IPRIORITYR 0x0400 #define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
/* /*
* The assumption is that the guest runs in a non-secure mode. * The assumption is that the guest runs in a non-secure mode.
...@@ -49,16 +53,24 @@ ...@@ -49,16 +53,24 @@
#define GICR_IGROUPR0 GICD_IGROUPR #define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER #define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER #define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
#define GICR_ISACTIVER0 GICD_ISACTIVER
#define GICR_ICACTIVER0 GICD_ICACTIVER #define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_ICENABLER GICD_ICENABLER
#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR #define GICR_IPRIORITYR0 GICD_IPRIORITYR
/* CPU interface registers */ /* CPU interface registers */
#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) #define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) #define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) #define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) #define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) #define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_ICV_AP1R0_EL1 sys_reg(3, 0, 12, 9, 0)
#define ICC_PMR_DEF_PRIO 0xf0 #define ICC_PMR_DEF_PRIO 0xf0
#define ICC_SRE_EL1_SRE (1U << 0) #define ICC_SRE_EL1_SRE (1U << 0)
......
...@@ -14,7 +14,21 @@ ...@@ -14,7 +14,21 @@
((uint64_t)(flags) << 12) | \ ((uint64_t)(flags) << 12) | \
index) index)
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa); uint64_t gicd_base_gpa, uint64_t gicr_base_gpa);
#endif /* SELFTEST_KVM_VGIC_H */ #define VGIC_MAX_RESERVED 1023
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
/* The vcpu arg only applies to private interrupts. */
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu);
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu);
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
#endif // SELFTEST_KVM_VGIC_H
...@@ -248,6 +248,8 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, ...@@ -248,6 +248,8 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write); void *val, bool write);
int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write); void *val, bool write);
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr); uint64_t attr);
...@@ -258,6 +260,14 @@ int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, ...@@ -258,6 +260,14 @@ int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr, void *val, bool write); uint64_t attr, void *val, bool write);
#define KVM_MAX_IRQ_ROUTES 4096
struct kvm_irq_routing *kvm_gsi_routing_create(void);
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin);
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
const char *exit_reason_str(unsigned int exit_reason); const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm); void virt_pgd_alloc(struct kvm_vm *vm);
......
...@@ -93,3 +93,69 @@ void gic_set_eoi(unsigned int intid) ...@@ -93,3 +93,69 @@ void gic_set_eoi(unsigned int intid)
GUEST_ASSERT(gic_common_ops); GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_eoir(intid); gic_common_ops->gic_write_eoir(intid);
} }
void gic_set_dir(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_dir(intid);
}
void gic_set_eoi_split(bool split)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_eoi_split(split);
}
void gic_set_priority_mask(uint64_t pmr)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority_mask(pmr);
}
void gic_set_priority(unsigned int intid, unsigned int prio)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority(intid, prio);
}
void gic_irq_set_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_active(intid);
}
void gic_irq_clear_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_active(intid);
}
bool gic_irq_get_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_active(intid);
}
void gic_irq_set_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_pending(intid);
}
void gic_irq_clear_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_pending(intid);
}
bool gic_irq_get_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_pending(intid);
}
void gic_irq_set_config(unsigned int intid, bool is_edge)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_config(intid, is_edge);
}
...@@ -14,6 +14,17 @@ struct gic_common_ops { ...@@ -14,6 +14,17 @@ struct gic_common_ops {
void (*gic_irq_disable)(unsigned int intid); void (*gic_irq_disable)(unsigned int intid);
uint64_t (*gic_read_iar)(void); uint64_t (*gic_read_iar)(void);
void (*gic_write_eoir)(uint32_t irq); void (*gic_write_eoir)(uint32_t irq);
void (*gic_write_dir)(uint32_t irq);
void (*gic_set_eoi_split)(bool split);
void (*gic_set_priority_mask)(uint64_t mask);
void (*gic_set_priority)(uint32_t intid, uint32_t prio);
void (*gic_irq_set_active)(uint32_t intid);
void (*gic_irq_clear_active)(uint32_t intid);
bool (*gic_irq_get_active)(uint32_t intid);
void (*gic_irq_set_pending)(uint32_t intid);
void (*gic_irq_clear_pending)(uint32_t intid);
bool (*gic_irq_get_pending)(uint32_t intid);
void (*gic_irq_set_config)(uint32_t intid, bool is_edge);
}; };
extern const struct gic_common_ops gicv3_ops; extern const struct gic_common_ops gicv3_ops;
......
...@@ -20,6 +20,7 @@ struct gicv3_data { ...@@ -20,6 +20,7 @@ struct gicv3_data {
}; };
#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) #define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
#define DIST_BIT (1U << 31)
enum gicv3_intid_range { enum gicv3_intid_range {
SGI_RANGE, SGI_RANGE,
...@@ -50,6 +51,14 @@ static void gicv3_gicr_wait_for_rwp(void *redist_base) ...@@ -50,6 +51,14 @@ static void gicv3_gicr_wait_for_rwp(void *redist_base)
} }
} }
static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
{
if (cpu_or_dist & DIST_BIT)
gicv3_gicd_wait_for_rwp();
else
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]);
}
static enum gicv3_intid_range get_intid_range(unsigned int intid) static enum gicv3_intid_range get_intid_range(unsigned int intid)
{ {
switch (intid) { switch (intid) {
...@@ -81,39 +90,175 @@ static void gicv3_write_eoir(uint32_t irq) ...@@ -81,39 +90,175 @@ static void gicv3_write_eoir(uint32_t irq)
isb(); isb();
} }
static void static void gicv3_write_dir(uint32_t irq)
gicv3_config_irq(unsigned int intid, unsigned int offset) {
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
static void gicv3_set_priority_mask(uint64_t mask)
{
write_sysreg_s(mask, SYS_ICC_PMR_EL1);
}
static void gicv3_set_eoi_split(bool split)
{
uint32_t val;
/* All other fields are read-only, so no need to read CTLR first. In
* fact, the kernel does the same.
*/
val = split ? (1U << 1) : 0;
write_sysreg_s(val, SYS_ICC_CTLR_EL1);
isb();
}
uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
return readl(base + offset);
}
void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
writel(reg_val, base + offset);
}
uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
{
return gicv3_reg_readl(cpu_or_dist, offset) & mask;
}
void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
uint32_t mask, uint32_t reg_val)
{
uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
tmp |= (reg_val & mask);
gicv3_reg_writel(cpu_or_dist, offset, tmp);
}
/*
* We use a single offset for the distributor and redistributor maps as they
* have the same value in both. The only exceptions are registers that only
* exist in one and not the other, like GICR_WAKER that doesn't exist in the
* distributor map. Such registers are conveniently marked as reserved in the
* map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
* marked as "Reserved" in the Distributor map.
*/
static void gicv3_access_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field,
bool write, uint32_t *val)
{ {
uint32_t cpu = guest_get_vcpuid(); uint32_t cpu = guest_get_vcpuid();
uint32_t mask = 1 << (intid % 32);
enum gicv3_intid_range intid_range = get_intid_range(intid); enum gicv3_intid_range intid_range = get_intid_range(intid);
void *reg; uint32_t fields_per_reg, index, mask, shift;
uint32_t cpu_or_dist;
/* We care about 'cpu' only for SGIs or PPIs */ GUEST_ASSERT(bits_per_field <= reg_bits);
if (intid_range == SGI_RANGE || intid_range == PPI_RANGE) { GUEST_ASSERT(*val < (1U << bits_per_field));
GUEST_ASSERT(cpu < gicv3_data.nr_cpus); /* Some registers like IROUTER are 64 bit long. Those are currently not
* supported by readl nor writel, so just asserting here until then.
*/
GUEST_ASSERT(reg_bits == 32);
reg = sgi_base_from_redist(gicv3_data.redist_base[cpu]) + fields_per_reg = reg_bits / bits_per_field;
offset; index = intid % fields_per_reg;
writel(mask, reg); shift = index * bits_per_field;
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu]); mask = ((1U << bits_per_field) - 1) << shift;
} else if (intid_range == SPI_RANGE) {
reg = gicv3_data.dist_base + offset + (intid / 32) * 4; /* Set offset to the actual register holding intid's config. */
writel(mask, reg); offset += (intid / fields_per_reg) * (reg_bits / 8);
gicv3_gicd_wait_for_rwp();
} else { cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
GUEST_ASSERT(0);
} if (write)
gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
}
static void gicv3_write_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
{
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, true, &val);
}
static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field)
{
uint32_t val;
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, false, &val);
return val;
}
static void gicv3_set_priority(uint32_t intid, uint32_t prio)
{
gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
}
/* Sets the intid to be level-sensitive or edge-triggered. */
static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
{
uint32_t val;
/* N/A for private interrupts. */
GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
val = is_edge ? 2 : 0;
gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
}
static void gicv3_irq_enable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_disable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_set_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
}
static void gicv3_irq_clear_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
}
static bool gicv3_irq_get_active(uint32_t intid)
{
return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
}
static void gicv3_irq_set_pending(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
} }
static void gicv3_irq_enable(unsigned int intid) static void gicv3_irq_clear_pending(uint32_t intid)
{ {
gicv3_config_irq(intid, GICD_ISENABLER); gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
} }
static void gicv3_irq_disable(unsigned int intid) static bool gicv3_irq_get_pending(uint32_t intid)
{ {
gicv3_config_irq(intid, GICD_ICENABLER); return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
} }
static void gicv3_enable_redist(void *redist_base) static void gicv3_enable_redist(void *redist_base)
...@@ -237,4 +382,15 @@ const struct gic_common_ops gicv3_ops = { ...@@ -237,4 +382,15 @@ const struct gic_common_ops gicv3_ops = {
.gic_irq_disable = gicv3_irq_disable, .gic_irq_disable = gicv3_irq_disable,
.gic_read_iar = gicv3_read_iar, .gic_read_iar = gicv3_read_iar,
.gic_write_eoir = gicv3_write_eoir, .gic_write_eoir = gicv3_write_eoir,
.gic_write_dir = gicv3_write_dir,
.gic_set_priority_mask = gicv3_set_priority_mask,
.gic_set_eoi_split = gicv3_set_eoi_split,
.gic_set_priority = gicv3_set_priority,
.gic_irq_set_active = gicv3_irq_set_active,
.gic_irq_clear_active = gicv3_irq_clear_active,
.gic_irq_get_active = gicv3_irq_get_active,
.gic_irq_set_pending = gicv3_irq_set_pending,
.gic_irq_clear_pending = gicv3_irq_clear_pending,
.gic_irq_get_pending = gicv3_irq_get_pending,
.gic_irq_set_config = gicv3_irq_set_config,
}; };
...@@ -5,11 +5,14 @@ ...@@ -5,11 +5,14 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <asm/kvm_para.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include "kvm_util.h" #include "kvm_util.h"
#include "../kvm_util_internal.h" #include "../kvm_util_internal.h"
#include "vgic.h" #include "vgic.h"
#include "gic.h"
#include "gic_v3.h"
/* /*
* vGIC-v3 default host setup * vGIC-v3 default host setup
...@@ -28,7 +31,7 @@ ...@@ -28,7 +31,7 @@
* redistributor regions of the guest. Since it depends on the number of * redistributor regions of the guest. Since it depends on the number of
* vCPUs for the VM, it must be called after all the vCPUs have been created. * vCPUs for the VM, it must be called after all the vCPUs have been created.
*/ */
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa) uint64_t gicd_base_gpa, uint64_t gicr_base_gpa)
{ {
int gic_fd; int gic_fd;
...@@ -50,6 +53,13 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, ...@@ -50,6 +53,13 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
/* Distributor setup */ /* Distributor setup */
gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0, &nr_irqs, true);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true); KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true);
nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE); nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE);
...@@ -68,3 +78,94 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, ...@@ -68,3 +78,94 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
return gic_fd; return gic_fd;
} }
/* should only work for level sensitive interrupts */
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
uint64_t attr = 32 * (intid / 32);
uint64_t index = intid % 32;
uint64_t val;
int ret;
ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val, false);
if (ret != 0)
return ret;
val |= 1U << index;
ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val, true);
return ret;
}
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
TEST_ASSERT(ret == 0, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO failed, "
"rc: %i errno: %i", ret, errno);
}
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK;
TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself "
"doesn't allow injecting SGIs. There's no mask for it.");
if (INTID_IS_PPI(intid))
irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT;
else
irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT;
return _kvm_irq_line(vm, irq, level);
}
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
int ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(ret == 0, "KVM_IRQ_LINE failed, rc: %i errno: %i",
ret, errno);
}
static void vgic_poke_irq(int gic_fd, uint32_t intid,
uint32_t vcpu, uint64_t reg_off)
{
uint64_t reg = intid / 32;
uint64_t index = intid % 32;
uint64_t attr = reg_off + reg * 4;
uint64_t val;
bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
/* Check that the addr part of the attr is within 32 bits. */
assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK);
uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
if (intid_is_private) {
/* TODO: only vcpu 0 implemented for now. */
assert(vcpu == 0);
attr += SZ_64K;
}
/* All calls will succeed, even with invalid intid's, as long as the
* addr part of the attr is within 32 bits (checked above). An invalid
* intid will just make the read/writes point to above the intended
* register space (i.e., ICPENDR after ISPENDR).
*/
kvm_device_access(gic_fd, group, attr, &val, false);
val |= 1ULL << index;
kvm_device_access(gic_fd, group, attr, &val, true);
}
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}
...@@ -2108,6 +2108,78 @@ int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, ...@@ -2108,6 +2108,78 @@ int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
return ret; return ret;
} }
/*
* IRQ related functions.
*/
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
struct kvm_irq_level irq_level = {
.irq = irq,
.level = level,
};
return _vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
}
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
int ret = _kvm_irq_line(vm, irq, level);
TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno);
}
struct kvm_irq_routing *kvm_gsi_routing_create(void)
{
struct kvm_irq_routing *routing;
size_t size;
size = sizeof(struct kvm_irq_routing);
/* Allocate space for the max number of entries: this wastes 196 KBs. */
size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
routing = calloc(1, size);
assert(routing);
return routing;
}
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin)
{
int i;
assert(routing);
assert(routing->nr < KVM_MAX_IRQ_ROUTES);
i = routing->nr;
routing->entries[i].gsi = gsi;
routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
routing->entries[i].flags = 0;
routing->entries[i].u.irqchip.irqchip = 0;
routing->entries[i].u.irqchip.pin = pin;
routing->nr++;
}
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
assert(routing);
ret = ioctl(vm_get_fd(vm), KVM_SET_GSI_ROUTING, routing);
free(routing);
return ret;
}
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
ret = _kvm_gsi_routing_write(vm, routing);
TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING failed, rc: %i errno: %i",
ret, errno);
}
/* /*
* VM Dump * VM Dump
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment