Commit ad7937dc authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/selftest/irq-injection into kvmarm-master/next

* kvm-arm64/selftest/irq-injection:
  : .
  : New tests from Ricardo Koller:
  : "This series adds a new test, aarch64/vgic-irq, that validates the injection of
  : different types of IRQs from userspace using various methods and configurations"
  : .
  KVM: selftests: aarch64: Add test for restoring active IRQs
  KVM: selftests: aarch64: Add ISPENDR write tests in vgic_irq
  KVM: selftests: aarch64: Add tests for IRQFD in vgic_irq
  KVM: selftests: Add IRQ GSI routing library functions
  KVM: selftests: aarch64: Add test_inject_fail to vgic_irq
  KVM: selftests: aarch64: Add tests for LEVEL_INFO in vgic_irq
  KVM: selftests: aarch64: Level-sensitive interrupts tests in vgic_irq
  KVM: selftests: aarch64: Add preemption tests in vgic_irq
  KVM: selftests: aarch64: Cmdline arg to set EOI mode in vgic_irq
  KVM: selftests: aarch64: Cmdline arg to set number of IRQs in vgic_irq test
  KVM: selftests: aarch64: Abstract the injection functions in vgic_irq
  KVM: selftests: aarch64: Add vgic_irq to test userspace IRQ injection
  KVM: selftests: aarch64: Add vGIC library functions to deal with vIRQ state
  KVM: selftests: Add kvm_irq_line library function
  KVM: selftests: aarch64: Add GICv3 register accessor library functions
  KVM: selftests: aarch64: Add function for accessing GICv3 dist and redist registers
  KVM: selftests: aarch64: Move gic_v3.h to shared headers
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 089606c0 728fcc46
......@@ -4,6 +4,7 @@
/aarch64/get-reg-list
/aarch64/psci_cpu_on_test
/aarch64/vgic_init
/aarch64/vgic_irq
/s390x/memop
/s390x/resets
/s390x/sync_regs_test
......
......@@ -94,6 +94,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
......
......@@ -382,7 +382,7 @@ static struct kvm_vm *test_vm_create(void)
ucall_init(vm, NULL);
test_init_timer_irq(vm);
vgic_v3_setup(vm, nr_vcpus, GICD_BASE_GPA, GICR_BASE_GPA);
vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
......
// SPDX-License-Identifier: GPL-2.0
/*
* vgic_irq.c - Test userspace injection of IRQs
*
* This test validates the injection of IRQs from userspace using various
* methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
* host to inject a specific intid via a GUEST_SYNC call, and then checks that
* it received it.
*/
#include <asm/kvm.h>
#include <asm/kvm_para.h>
#include <sys/eventfd.h>
#include <linux/sizes.h>
#include "processor.h"
#include "test_util.h"
#include "kvm_util.h"
#include "gic.h"
#include "gic_v3.h"
#include "vgic.h"
#define GICD_BASE_GPA 0x08000000ULL
#define GICR_BASE_GPA 0x080A0000ULL
#define VCPU_ID 0
/*
* Stores the user specified args; it's passed to the guest and to every test
* function.
*/
struct test_args {
uint32_t nr_irqs; /* number of KVM supported IRQs. */
bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
bool level_sensitive; /* 1 is level, 0 is edge */
int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
};
/*
* KVM implements 32 priority levels:
* 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
*
* Note that these macros will still be correct in the case that KVM implements
* more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
*/
#define KVM_NUM_PRIOS 32
#define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
#define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
#define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
#define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
#define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
#define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
static void *dist = (void *)GICD_BASE_GPA;
static void *redist = (void *)GICR_BASE_GPA;
/*
* The kvm_inject_* utilities are used by the guest to ask the host to inject
* interrupts (e.g., using the KVM_IRQ_LINE ioctl).
*/
typedef enum {
KVM_INJECT_EDGE_IRQ_LINE = 1,
KVM_SET_IRQ_LINE,
KVM_SET_IRQ_LINE_HIGH,
KVM_SET_LEVEL_INFO_HIGH,
KVM_INJECT_IRQFD,
KVM_WRITE_ISPENDR,
KVM_WRITE_ISACTIVER,
} kvm_inject_cmd;
struct kvm_inject_args {
kvm_inject_cmd cmd;
uint32_t first_intid;
uint32_t num;
int level;
bool expect_failure;
};
/* Used on the guest side to perform the hypercall. */
static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
uint32_t num, int level, bool expect_failure);
/* Used on the host side to get the hypercall info. */
static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
struct kvm_inject_args *args);
#define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
#define KVM_INJECT_MULTI(cmd, intid, num) \
_KVM_INJECT_MULTI(cmd, intid, num, false)
#define _KVM_INJECT(cmd, intid, expect_failure) \
_KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
#define KVM_INJECT(cmd, intid) \
_KVM_INJECT_MULTI(cmd, intid, 1, false)
#define KVM_ACTIVATE(cmd, intid) \
kvm_inject_call(cmd, intid, 1, 1, false);
struct kvm_inject_desc {
kvm_inject_cmd cmd;
/* can inject PPIs, PPIs, and/or SPIs. */
bool sgi, ppi, spi;
};
static struct kvm_inject_desc inject_edge_fns[] = {
/* sgi ppi spi */
{ KVM_INJECT_EDGE_IRQ_LINE, false, false, true },
{ KVM_INJECT_IRQFD, false, false, true },
{ KVM_WRITE_ISPENDR, true, false, true },
{ 0, },
};
static struct kvm_inject_desc inject_level_fns[] = {
/* sgi ppi spi */
{ KVM_SET_IRQ_LINE_HIGH, false, true, true },
{ KVM_SET_LEVEL_INFO_HIGH, false, true, true },
{ KVM_INJECT_IRQFD, false, false, true },
{ KVM_WRITE_ISPENDR, false, true, true },
{ 0, },
};
static struct kvm_inject_desc set_active_fns[] = {
/* sgi ppi spi */
{ KVM_WRITE_ISACTIVER, true, true, true },
{ 0, },
};
#define for_each_inject_fn(t, f) \
for ((f) = (t); (f)->cmd; (f)++)
#define for_each_supported_inject_fn(args, t, f) \
for_each_inject_fn(t, f) \
if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
#define for_each_supported_activate_fn(args, t, f) \
for_each_supported_inject_fn((args), (t), (f))
/* Shared between the guest main thread and the IRQ handlers. */
volatile uint64_t irq_handled;
volatile uint32_t irqnr_received[MAX_SPI + 1];
static void reset_stats(void)
{
int i;
irq_handled = 0;
for (i = 0; i <= MAX_SPI; i++)
irqnr_received[i] = 0;
}
static uint64_t gic_read_ap1r0(void)
{
uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
dsb(sy);
return reg;
}
static void gic_write_ap1r0(uint64_t val)
{
write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
isb();
}
static void guest_set_irq_line(uint32_t intid, uint32_t level);
static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
{
uint32_t intid = gic_get_and_ack_irq();
if (intid == IAR_SPURIOUS)
return;
GUEST_ASSERT(gic_irq_get_active(intid));
if (!level_sensitive)
GUEST_ASSERT(!gic_irq_get_pending(intid));
if (level_sensitive)
guest_set_irq_line(intid, 0);
GUEST_ASSERT(intid < MAX_SPI);
irqnr_received[intid] += 1;
irq_handled += 1;
gic_set_eoi(intid);
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
if (eoi_split)
gic_set_dir(intid);
GUEST_ASSERT(!gic_irq_get_active(intid));
GUEST_ASSERT(!gic_irq_get_pending(intid));
}
static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
uint32_t num, int level, bool expect_failure)
{
struct kvm_inject_args args = {
.cmd = cmd,
.first_intid = first_intid,
.num = num,
.level = level,
.expect_failure = expect_failure,
};
GUEST_SYNC(&args);
}
#define GUEST_ASSERT_IAR_EMPTY() \
do { \
uint32_t _intid; \
_intid = gic_get_and_ack_irq(); \
GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
} while (0)
#define CAT_HELPER(a, b) a ## b
#define CAT(a, b) CAT_HELPER(a, b)
#define PREFIX guest_irq_handler_
#define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
#define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
{ \
guest_irq_generic_handler(split, lev); \
}
GENERATE_GUEST_IRQ_HANDLER(0, 0);
GENERATE_GUEST_IRQ_HANDLER(0, 1);
GENERATE_GUEST_IRQ_HANDLER(1, 0);
GENERATE_GUEST_IRQ_HANDLER(1, 1);
static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
{GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
{GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
};
static void reset_priorities(struct test_args *args)
{
int i;
for (i = 0; i < args->nr_irqs; i++)
gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
}
static void guest_set_irq_line(uint32_t intid, uint32_t level)
{
kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
}
static void test_inject_fail(struct test_args *args,
uint32_t intid, kvm_inject_cmd cmd)
{
reset_stats();
_KVM_INJECT(cmd, intid, true);
/* no IRQ to handle on entry */
GUEST_ASSERT_EQ(irq_handled, 0);
GUEST_ASSERT_IAR_EMPTY();
}
static void guest_inject(struct test_args *args,
uint32_t first_intid, uint32_t num,
kvm_inject_cmd cmd)
{
uint32_t i;
reset_stats();
/* Cycle over all priorities to make things more interesting. */
for (i = first_intid; i < num + first_intid; i++)
gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
asm volatile("msr daifset, #2" : : : "memory");
KVM_INJECT_MULTI(cmd, first_intid, num);
while (irq_handled < num) {
asm volatile("wfi\n"
"msr daifclr, #2\n"
/* handle IRQ */
"msr daifset, #2\n"
: : : "memory");
}
asm volatile("msr daifclr, #2" : : : "memory");
GUEST_ASSERT_EQ(irq_handled, num);
for (i = first_intid; i < num + first_intid; i++)
GUEST_ASSERT_EQ(irqnr_received[i], 1);
GUEST_ASSERT_IAR_EMPTY();
reset_priorities(args);
}
/*
* Restore the active state of multiple concurrent IRQs (given by
* concurrent_irqs). This does what a live-migration would do on the
* destination side assuming there are some active IRQs that were not
* deactivated yet.
*/
static void guest_restore_active(struct test_args *args,
uint32_t first_intid, uint32_t num,
kvm_inject_cmd cmd)
{
uint32_t prio, intid, ap1r;
int i;
/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
* in descending order, so intid+1 can preempt intid.
*/
for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
GUEST_ASSERT(prio >= 0);
intid = i + first_intid;
gic_set_priority(intid, prio);
}
/* In a real migration, KVM would restore all GIC state before running
* guest code.
*/
for (i = 0; i < num; i++) {
intid = i + first_intid;
KVM_ACTIVATE(cmd, intid);
ap1r = gic_read_ap1r0();
ap1r |= 1U << i;
gic_write_ap1r0(ap1r);
}
/* This is where the "migration" would occur. */
/* finish handling the IRQs starting with the highest priority one. */
for (i = 0; i < num; i++) {
intid = num - i - 1 + first_intid;
gic_set_eoi(intid);
if (args->eoi_split)
gic_set_dir(intid);
}
for (i = 0; i < num; i++)
GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
GUEST_ASSERT_IAR_EMPTY();
}
/*
* Polls the IAR until it's not a spurious interrupt.
*
* This function should only be used in test_inject_preemption (with IRQs
* masked).
*/
static uint32_t wait_for_and_activate_irq(void)
{
uint32_t intid;
do {
asm volatile("wfi" : : : "memory");
intid = gic_get_and_ack_irq();
} while (intid == IAR_SPURIOUS);
return intid;
}
/*
* Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
* handle them without handling the actual exceptions. This is done by masking
* interrupts for the whole test.
*/
static void test_inject_preemption(struct test_args *args,
uint32_t first_intid, int num,
kvm_inject_cmd cmd)
{
uint32_t intid, prio, step = KVM_PRIO_STEPS;
int i;
/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
* in descending order, so intid+1 can preempt intid.
*/
for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
GUEST_ASSERT(prio >= 0);
intid = i + first_intid;
gic_set_priority(intid, prio);
}
local_irq_disable();
for (i = 0; i < num; i++) {
uint32_t tmp;
intid = i + first_intid;
KVM_INJECT(cmd, intid);
/* Each successive IRQ will preempt the previous one. */
tmp = wait_for_and_activate_irq();
GUEST_ASSERT_EQ(tmp, intid);
if (args->level_sensitive)
guest_set_irq_line(intid, 0);
}
/* finish handling the IRQs starting with the highest priority one. */
for (i = 0; i < num; i++) {
intid = num - i - 1 + first_intid;
gic_set_eoi(intid);
if (args->eoi_split)
gic_set_dir(intid);
}
local_irq_enable();
for (i = 0; i < num; i++)
GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
GUEST_ASSERT_IAR_EMPTY();
reset_priorities(args);
}
static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
{
uint32_t nr_irqs = args->nr_irqs;
if (f->sgi) {
guest_inject(args, MIN_SGI, 1, f->cmd);
guest_inject(args, 0, 16, f->cmd);
}
if (f->ppi)
guest_inject(args, MIN_PPI, 1, f->cmd);
if (f->spi) {
guest_inject(args, MIN_SPI, 1, f->cmd);
guest_inject(args, nr_irqs - 1, 1, f->cmd);
guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
}
}
static void test_injection_failure(struct test_args *args,
struct kvm_inject_desc *f)
{
uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
int i;
for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
test_inject_fail(args, bad_intid[i], f->cmd);
}
static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
{
/*
* Test up to 4 levels of preemption. The reason is that KVM doesn't
* currently implement the ability to have more than the number-of-LRs
* number of concurrently active IRQs. The number of LRs implemented is
* IMPLEMENTATION DEFINED, however, it seems that most implement 4.
*/
if (f->sgi)
test_inject_preemption(args, MIN_SGI, 4, f->cmd);
if (f->ppi)
test_inject_preemption(args, MIN_PPI, 4, f->cmd);
if (f->spi)
test_inject_preemption(args, MIN_SPI, 4, f->cmd);
}
static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
{
/* Test up to 4 active IRQs. Same reason as in test_preemption. */
if (f->sgi)
guest_restore_active(args, MIN_SGI, 4, f->cmd);
if (f->ppi)
guest_restore_active(args, MIN_PPI, 4, f->cmd);
if (f->spi)
guest_restore_active(args, MIN_SPI, 4, f->cmd);
}
static void guest_code(struct test_args args)
{
uint32_t i, nr_irqs = args.nr_irqs;
bool level_sensitive = args.level_sensitive;
struct kvm_inject_desc *f, *inject_fns;
gic_init(GIC_V3, 1, dist, redist);
for (i = 0; i < nr_irqs; i++)
gic_irq_enable(i);
for (i = MIN_SPI; i < nr_irqs; i++)
gic_irq_set_config(i, !args.level_sensitive);
gic_set_eoi_split(args.eoi_split);
reset_priorities(&args);
gic_set_priority_mask(CPU_PRIO_MASK);
inject_fns = level_sensitive ? inject_level_fns
: inject_edge_fns;
local_irq_enable();
/* Start the tests. */
for_each_supported_inject_fn(&args, inject_fns, f) {
test_injection(&args, f);
test_preemption(&args, f);
test_injection_failure(&args, f);
}
/* Restore the active state of IRQs. This would happen when live
* migrating IRQs in the middle of being handled.
*/
for_each_supported_activate_fn(&args, set_active_fns, f)
test_restore_active(&args, f);
GUEST_DONE();
}
static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
struct test_args *test_args, bool expect_failure)
{
int ret;
if (!expect_failure) {
kvm_arm_irq_line(vm, intid, level);
} else {
/* The interface doesn't allow larger intid's. */
if (intid > KVM_ARM_IRQ_NUM_MASK)
return;
ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %i did not cause KVM_IRQ_LINE "
"error: rc: %i errno: %i", intid, ret, errno);
}
}
void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
bool expect_failure)
{
if (!expect_failure) {
kvm_irq_set_level_info(gic_fd, intid, level);
} else {
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
/*
* The kernel silently fails for invalid SPIs and SGIs (which
* are not level-sensitive). It only checks for intid to not
* spill over 1U << 10 (the max reserved SPI). Also, callers
* are supposed to mask the intid with 0x3ff (1023).
*/
if (intid > VGIC_MAX_RESERVED)
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
"error: rc: %i errno: %i", intid, ret, errno);
else
TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
"for intid %i failed, rc: %i errno: %i",
intid, ret, errno);
}
}
static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
bool expect_failure)
{
struct kvm_irq_routing *routing;
int ret;
uint64_t i;
assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
routing = kvm_gsi_routing_create();
for (i = intid; i < (uint64_t)intid + num; i++)
kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
if (!expect_failure) {
kvm_gsi_routing_write(vm, routing);
} else {
ret = _kvm_gsi_routing_write(vm, routing);
/* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */
if (intid >= KVM_IRQCHIP_NUM_PINS)
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %u did not cause KVM_SET_GSI_ROUTING "
"error: rc: %i errno: %i", intid, ret, errno);
else
TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
"for intid %i failed, rc: %i errno: %i",
intid, ret, errno);
}
}
static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
uint32_t vcpu, bool expect_failure)
{
/*
* Ignore this when expecting failure as invalid intids will lead to
* either trying to inject SGIs when we configured the test to be
* level_sensitive (or the reverse), or inject large intids which
* will lead to writing above the ISPENDR register space (and we
* don't want to do that either).
*/
if (!expect_failure)
kvm_irq_write_ispendr(gic_fd, intid, vcpu);
}
static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
bool expect_failure)
{
int fd[MAX_SPI];
uint64_t val;
int ret, f;
uint64_t i;
/*
* There is no way to try injecting an SGI or PPI as the interface
* starts counting from the first SPI (above the private ones), so just
* exit.
*/
if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
return;
kvm_set_gsi_routing_irqchip_check(vm, intid, num,
kvm_max_routes, expect_failure);
/*
* If expect_failure, then just to inject anyway. These
* will silently fail. And in any case, the guest will check
* that no actual interrupt was injected for those cases.
*/
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
fd[f] = eventfd(0, 0);
TEST_ASSERT(fd[f] != -1,
"eventfd failed, errno: %i\n", errno);
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
struct kvm_irqfd irqfd = {
.fd = fd[f],
.gsi = i - MIN_SPI,
};
assert(i <= (uint64_t)UINT_MAX);
vm_ioctl(vm, KVM_IRQFD, &irqfd);
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
val = 1;
ret = write(fd[f], &val, sizeof(uint64_t));
TEST_ASSERT(ret == sizeof(uint64_t),
"Write to KVM_IRQFD failed with ret: %d\n", ret);
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
close(fd[f]);
}
/* handles the valid case: intid=0xffffffff num=1 */
#define for_each_intid(first, num, tmp, i) \
for ((tmp) = (i) = (first); \
(tmp) < (uint64_t)(first) + (uint64_t)(num); \
(tmp)++, (i)++)
static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
struct kvm_inject_args *inject_args,
struct test_args *test_args)
{
kvm_inject_cmd cmd = inject_args->cmd;
uint32_t intid = inject_args->first_intid;
uint32_t num = inject_args->num;
int level = inject_args->level;
bool expect_failure = inject_args->expect_failure;
uint64_t tmp;
uint32_t i;
/* handles the valid case: intid=0xffffffff num=1 */
assert(intid < UINT_MAX - num || num == 1);
switch (cmd) {
case KVM_INJECT_EDGE_IRQ_LINE:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 1, test_args,
expect_failure);
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 0, test_args,
expect_failure);
break;
case KVM_SET_IRQ_LINE:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, level, test_args,
expect_failure);
break;
case KVM_SET_IRQ_LINE_HIGH:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 1, test_args,
expect_failure);
break;
case KVM_SET_LEVEL_INFO_HIGH:
for_each_intid(intid, num, tmp, i)
kvm_irq_set_level_info_check(gic_fd, i, 1,
expect_failure);
break;
case KVM_INJECT_IRQFD:
kvm_routing_and_irqfd_check(vm, intid, num,
test_args->kvm_max_routes,
expect_failure);
break;
case KVM_WRITE_ISPENDR:
for (i = intid; i < intid + num; i++)
kvm_irq_write_ispendr_check(gic_fd, i,
VCPU_ID, expect_failure);
break;
case KVM_WRITE_ISACTIVER:
for (i = intid; i < intid + num; i++)
kvm_irq_write_isactiver(gic_fd, i, VCPU_ID);
break;
default:
break;
}
}
static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
struct kvm_inject_args *args)
{
struct kvm_inject_args *kvm_args_hva;
vm_vaddr_t kvm_args_gva;
kvm_args_gva = uc->args[1];
kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
}
static void print_args(struct test_args *args)
{
printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
args->nr_irqs, args->level_sensitive,
args->eoi_split);
}
static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
{
struct ucall uc;
int gic_fd;
struct kvm_vm *vm;
struct kvm_inject_args inject_args;
struct test_args args = {
.nr_irqs = nr_irqs,
.level_sensitive = level_sensitive,
.eoi_split = eoi_split,
.kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
.kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
};
print_args(&args);
vm = vm_create_default(VCPU_ID, 0, guest_code);
ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
/* Setup the guest args page (so it gets the args). */
vcpu_args_set(vm, 0, 1, args);
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
while (1) {
vcpu_run(vm, VCPU_ID);
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_SYNC:
kvm_inject_get_call(vm, &uc, &inject_args);
run_guest_cmd(vm, gic_fd, &inject_args, &args);
break;
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
(const char *)uc.args[0],
__FILE__, uc.args[1], uc.args[2], uc.args[3]);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
close(gic_fd);
kvm_vm_free(vm);
}
static void help(const char *name)
{
printf(
"\n"
"usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
printf(" -n: specify number of IRQs to setup the vgic with. "
"It has to be a multiple of 32 and between 64 and 1024.\n");
printf(" -e: if 1 then EOI is split into a write to DIR on top "
"of writing EOI.\n");
printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
puts("");
exit(1);
}
int main(int argc, char **argv)
{
uint32_t nr_irqs = 64;
bool default_args = true;
bool level_sensitive = false;
int opt;
bool eoi_split = false;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
switch (opt) {
case 'n':
nr_irqs = atoi(optarg);
if (nr_irqs > 1024 || nr_irqs % 32)
help(argv[0]);
break;
case 'e':
eoi_split = (bool)atoi(optarg);
default_args = false;
break;
case 'l':
level_sensitive = (bool)atoi(optarg);
default_args = false;
break;
case 'h':
default:
help(argv[0]);
break;
}
}
/* If the user just specified nr_irqs and/or gic_version, then run all
* combinations.
*/
if (default_args) {
test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
} else {
test_vgic(nr_irqs, level_sensitive, eoi_split);
}
return 0;
}
......@@ -11,11 +11,37 @@ enum gic_type {
GIC_TYPE_MAX,
};
#define MIN_SGI 0
#define MIN_PPI 16
#define MIN_SPI 32
#define MAX_SPI 1019
#define IAR_SPURIOUS 1023
#define INTID_IS_SGI(intid) (0 <= (intid) && (intid) < MIN_PPI)
#define INTID_IS_PPI(intid) (MIN_PPI <= (intid) && (intid) < MIN_SPI)
#define INTID_IS_SPI(intid) (MIN_SPI <= (intid) && (intid) <= MAX_SPI)
void gic_init(enum gic_type type, unsigned int nr_cpus,
void *dist_base, void *redist_base);
void gic_irq_enable(unsigned int intid);
void gic_irq_disable(unsigned int intid);
unsigned int gic_get_and_ack_irq(void);
void gic_set_eoi(unsigned int intid);
void gic_set_dir(unsigned int intid);
/*
* Sets the EOI mode. When split is false, EOI just drops the priority. When
* split is true, EOI drops the priority and deactivates the interrupt.
*/
void gic_set_eoi_split(bool split);
void gic_set_priority_mask(uint64_t mask);
void gic_set_priority(uint32_t intid, uint32_t prio);
void gic_irq_set_active(unsigned int intid);
void gic_irq_clear_active(unsigned int intid);
bool gic_irq_get_active(unsigned int intid);
void gic_irq_set_pending(unsigned int intid);
void gic_irq_clear_pending(unsigned int intid);
bool gic_irq_get_pending(unsigned int intid);
void gic_irq_set_config(unsigned int intid, bool is_edge);
#endif /* SELFTEST_KVM_GIC_H */
......@@ -16,8 +16,12 @@
#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
#define GICD_ICPENDR 0x0280
#define GICD_ICACTIVER 0x0380
#define GICD_ISACTIVER 0x0300
#define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
/*
* The assumption is that the guest runs in a non-secure mode.
......@@ -49,16 +53,24 @@
#define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
#define GICR_ISACTIVER0 GICD_ISACTIVER
#define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_ICENABLER GICD_ICENABLER
#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
/* CPU interface registers */
#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_ICV_AP1R0_EL1 sys_reg(3, 0, 12, 9, 0)
#define ICC_PMR_DEF_PRIO 0xf0
#define ICC_SRE_EL1_SRE (1U << 0)
......
......@@ -14,7 +14,21 @@
((uint64_t)(flags) << 12) | \
index)
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa);
#endif /* SELFTEST_KVM_VGIC_H */
#define VGIC_MAX_RESERVED 1023
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
/* The vcpu arg only applies to private interrupts. */
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu);
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu);
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
#endif // SELFTEST_KVM_VGIC_H
......@@ -248,6 +248,8 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write);
int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write);
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr);
......@@ -258,6 +260,14 @@ int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr, void *val, bool write);
#define KVM_MAX_IRQ_ROUTES 4096
struct kvm_irq_routing *kvm_gsi_routing_create(void);
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin);
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm);
......
......@@ -93,3 +93,69 @@ void gic_set_eoi(unsigned int intid)
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_eoir(intid);
}
void gic_set_dir(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_dir(intid);
}
void gic_set_eoi_split(bool split)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_eoi_split(split);
}
void gic_set_priority_mask(uint64_t pmr)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority_mask(pmr);
}
void gic_set_priority(unsigned int intid, unsigned int prio)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority(intid, prio);
}
void gic_irq_set_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_active(intid);
}
void gic_irq_clear_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_active(intid);
}
bool gic_irq_get_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_active(intid);
}
void gic_irq_set_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_pending(intid);
}
void gic_irq_clear_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_pending(intid);
}
bool gic_irq_get_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_pending(intid);
}
void gic_irq_set_config(unsigned int intid, bool is_edge)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_config(intid, is_edge);
}
......@@ -14,6 +14,17 @@ struct gic_common_ops {
void (*gic_irq_disable)(unsigned int intid);
uint64_t (*gic_read_iar)(void);
void (*gic_write_eoir)(uint32_t irq);
void (*gic_write_dir)(uint32_t irq);
void (*gic_set_eoi_split)(bool split);
void (*gic_set_priority_mask)(uint64_t mask);
void (*gic_set_priority)(uint32_t intid, uint32_t prio);
void (*gic_irq_set_active)(uint32_t intid);
void (*gic_irq_clear_active)(uint32_t intid);
bool (*gic_irq_get_active)(uint32_t intid);
void (*gic_irq_set_pending)(uint32_t intid);
void (*gic_irq_clear_pending)(uint32_t intid);
bool (*gic_irq_get_pending)(uint32_t intid);
void (*gic_irq_set_config)(uint32_t intid, bool is_edge);
};
extern const struct gic_common_ops gicv3_ops;
......
......@@ -19,7 +19,8 @@ struct gicv3_data {
unsigned int nr_spis;
};
#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
#define DIST_BIT (1U << 31)
enum gicv3_intid_range {
SGI_RANGE,
......@@ -50,6 +51,14 @@ static void gicv3_gicr_wait_for_rwp(void *redist_base)
}
}
static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
{
if (cpu_or_dist & DIST_BIT)
gicv3_gicd_wait_for_rwp();
else
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]);
}
static enum gicv3_intid_range get_intid_range(unsigned int intid)
{
switch (intid) {
......@@ -81,39 +90,175 @@ static void gicv3_write_eoir(uint32_t irq)
isb();
}
static void
gicv3_config_irq(unsigned int intid, unsigned int offset)
static void gicv3_write_dir(uint32_t irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
static void gicv3_set_priority_mask(uint64_t mask)
{
write_sysreg_s(mask, SYS_ICC_PMR_EL1);
}
static void gicv3_set_eoi_split(bool split)
{
uint32_t val;
/* All other fields are read-only, so no need to read CTLR first. In
* fact, the kernel does the same.
*/
val = split ? (1U << 1) : 0;
write_sysreg_s(val, SYS_ICC_CTLR_EL1);
isb();
}
uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
return readl(base + offset);
}
void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
writel(reg_val, base + offset);
}
uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
{
return gicv3_reg_readl(cpu_or_dist, offset) & mask;
}
void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
uint32_t mask, uint32_t reg_val)
{
uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
tmp |= (reg_val & mask);
gicv3_reg_writel(cpu_or_dist, offset, tmp);
}
/*
* We use a single offset for the distributor and redistributor maps as they
* have the same value in both. The only exceptions are registers that only
* exist in one and not the other, like GICR_WAKER that doesn't exist in the
* distributor map. Such registers are conveniently marked as reserved in the
* map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
* marked as "Reserved" in the Distributor map.
*/
static void gicv3_access_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field,
bool write, uint32_t *val)
{
uint32_t cpu = guest_get_vcpuid();
uint32_t mask = 1 << (intid % 32);
enum gicv3_intid_range intid_range = get_intid_range(intid);
void *reg;
/* We care about 'cpu' only for SGIs or PPIs */
if (intid_range == SGI_RANGE || intid_range == PPI_RANGE) {
GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
reg = sgi_base_from_redist(gicv3_data.redist_base[cpu]) +
offset;
writel(mask, reg);
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu]);
} else if (intid_range == SPI_RANGE) {
reg = gicv3_data.dist_base + offset + (intid / 32) * 4;
writel(mask, reg);
gicv3_gicd_wait_for_rwp();
} else {
GUEST_ASSERT(0);
}
uint32_t fields_per_reg, index, mask, shift;
uint32_t cpu_or_dist;
GUEST_ASSERT(bits_per_field <= reg_bits);
GUEST_ASSERT(*val < (1U << bits_per_field));
/* Some registers like IROUTER are 64 bit long. Those are currently not
* supported by readl nor writel, so just asserting here until then.
*/
GUEST_ASSERT(reg_bits == 32);
fields_per_reg = reg_bits / bits_per_field;
index = intid % fields_per_reg;
shift = index * bits_per_field;
mask = ((1U << bits_per_field) - 1) << shift;
/* Set offset to the actual register holding intid's config. */
offset += (intid / fields_per_reg) * (reg_bits / 8);
cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
if (write)
gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
}
static void gicv3_write_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
{
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, true, &val);
}
static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field)
{
uint32_t val;
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, false, &val);
return val;
}
static void gicv3_set_priority(uint32_t intid, uint32_t prio)
{
gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
}
/* Sets the intid to be level-sensitive or edge-triggered. */
static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
{
uint32_t val;
/* N/A for private interrupts. */
GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
val = is_edge ? 2 : 0;
gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
}
static void gicv3_irq_enable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_disable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_set_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
}
static void gicv3_irq_clear_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
}
static bool gicv3_irq_get_active(uint32_t intid)
{
return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
}
static void gicv3_irq_set_pending(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
}
static void gicv3_irq_enable(unsigned int intid)
static void gicv3_irq_clear_pending(uint32_t intid)
{
gicv3_config_irq(intid, GICD_ISENABLER);
gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
}
static void gicv3_irq_disable(unsigned int intid)
static bool gicv3_irq_get_pending(uint32_t intid)
{
gicv3_config_irq(intid, GICD_ICENABLER);
return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
}
static void gicv3_enable_redist(void *redist_base)
......@@ -237,4 +382,15 @@ const struct gic_common_ops gicv3_ops = {
.gic_irq_disable = gicv3_irq_disable,
.gic_read_iar = gicv3_read_iar,
.gic_write_eoir = gicv3_write_eoir,
.gic_write_dir = gicv3_write_dir,
.gic_set_priority_mask = gicv3_set_priority_mask,
.gic_set_eoi_split = gicv3_set_eoi_split,
.gic_set_priority = gicv3_set_priority,
.gic_irq_set_active = gicv3_irq_set_active,
.gic_irq_clear_active = gicv3_irq_clear_active,
.gic_irq_get_active = gicv3_irq_get_active,
.gic_irq_set_pending = gicv3_irq_set_pending,
.gic_irq_clear_pending = gicv3_irq_clear_pending,
.gic_irq_get_pending = gicv3_irq_get_pending,
.gic_irq_set_config = gicv3_irq_set_config,
};
......@@ -5,11 +5,14 @@
#include <linux/kvm.h>
#include <linux/sizes.h>
#include <asm/kvm_para.h>
#include <asm/kvm.h>
#include "kvm_util.h"
#include "../kvm_util_internal.h"
#include "vgic.h"
#include "gic.h"
#include "gic_v3.h"
/*
* vGIC-v3 default host setup
......@@ -28,7 +31,7 @@
* redistributor regions of the guest. Since it depends on the number of
* vCPUs for the VM, it must be called after all the vCPUs have been created.
*/
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa)
{
int gic_fd;
......@@ -50,6 +53,13 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
/* Distributor setup */
gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0, &nr_irqs, true);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true);
nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE);
......@@ -68,3 +78,94 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
return gic_fd;
}
/* should only work for level sensitive interrupts */
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
uint64_t attr = 32 * (intid / 32);
uint64_t index = intid % 32;
uint64_t val;
int ret;
ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val, false);
if (ret != 0)
return ret;
val |= 1U << index;
ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val, true);
return ret;
}
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
TEST_ASSERT(ret == 0, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO failed, "
"rc: %i errno: %i", ret, errno);
}
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK;
TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself "
"doesn't allow injecting SGIs. There's no mask for it.");
if (INTID_IS_PPI(intid))
irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT;
else
irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT;
return _kvm_irq_line(vm, irq, level);
}
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
int ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(ret == 0, "KVM_IRQ_LINE failed, rc: %i errno: %i",
ret, errno);
}
static void vgic_poke_irq(int gic_fd, uint32_t intid,
uint32_t vcpu, uint64_t reg_off)
{
uint64_t reg = intid / 32;
uint64_t index = intid % 32;
uint64_t attr = reg_off + reg * 4;
uint64_t val;
bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
/* Check that the addr part of the attr is within 32 bits. */
assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK);
uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
if (intid_is_private) {
/* TODO: only vcpu 0 implemented for now. */
assert(vcpu == 0);
attr += SZ_64K;
}
/* All calls will succeed, even with invalid intid's, as long as the
* addr part of the attr is within 32 bits (checked above). An invalid
* intid will just make the read/writes point to above the intended
* register space (i.e., ICPENDR after ISPENDR).
*/
kvm_device_access(gic_fd, group, attr, &val, false);
val |= 1ULL << index;
kvm_device_access(gic_fd, group, attr, &val, true);
}
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}
......@@ -2108,6 +2108,78 @@ int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
return ret;
}
/*
* IRQ related functions.
*/
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
struct kvm_irq_level irq_level = {
.irq = irq,
.level = level,
};
return _vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
}
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
int ret = _kvm_irq_line(vm, irq, level);
TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno);
}
struct kvm_irq_routing *kvm_gsi_routing_create(void)
{
struct kvm_irq_routing *routing;
size_t size;
size = sizeof(struct kvm_irq_routing);
/* Allocate space for the max number of entries: this wastes 196 KBs. */
size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
routing = calloc(1, size);
assert(routing);
return routing;
}
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin)
{
int i;
assert(routing);
assert(routing->nr < KVM_MAX_IRQ_ROUTES);
i = routing->nr;
routing->entries[i].gsi = gsi;
routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
routing->entries[i].flags = 0;
routing->entries[i].u.irqchip.irqchip = 0;
routing->entries[i].u.irqchip.pin = pin;
routing->nr++;
}
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
assert(routing);
ret = ioctl(vm_get_fd(vm), KVM_SET_GSI_ROUTING, routing);
free(routing);
return ret;
}
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
ret = _kvm_gsi_routing_write(vm, routing);
TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING failed, rc: %i errno: %i",
ret, errno);
}
/*
* VM Dump
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment