Commit f074158a authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-riscv-6.9-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 6.9

- Exception and interrupt handling for selftests
- Sstc (aka arch_timer) selftest
- Forward seed CSR access to KVM userspace
- Ztso extension support for Guest/VM
- Zacas extension support for Guest/VM
parents 961e2bfc d8c08313
...@@ -165,6 +165,8 @@ enum KVM_RISCV_ISA_EXT_ID { ...@@ -165,6 +165,8 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZVFH, KVM_RISCV_ISA_EXT_ZVFH,
KVM_RISCV_ISA_EXT_ZVFHMIN, KVM_RISCV_ISA_EXT_ZVFHMIN,
KVM_RISCV_ISA_EXT_ZFA, KVM_RISCV_ISA_EXT_ZFA,
KVM_RISCV_ISA_EXT_ZTSO,
KVM_RISCV_ISA_EXT_ZACAS,
KVM_RISCV_ISA_EXT_MAX, KVM_RISCV_ISA_EXT_MAX,
}; };
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/cpufeature.h>
#define INSN_OPCODE_MASK 0x007c #define INSN_OPCODE_MASK 0x007c
#define INSN_OPCODE_SHIFT 2 #define INSN_OPCODE_SHIFT 2
#define INSN_OPCODE_SYSTEM 28 #define INSN_OPCODE_SYSTEM 28
...@@ -213,9 +215,20 @@ struct csr_func { ...@@ -213,9 +215,20 @@ struct csr_func {
unsigned long wr_mask); unsigned long wr_mask);
}; };
static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR))
return KVM_INSN_ILLEGAL_TRAP;
return KVM_INSN_EXIT_TO_USER_SPACE;
}
static const struct csr_func csr_funcs[] = { static const struct csr_func csr_funcs[] = {
KVM_RISCV_VCPU_AIA_CSR_FUNCS KVM_RISCV_VCPU_AIA_CSR_FUNCS
KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
{ .base = CSR_SEED, .count = 1, .func = seed_csr_rmw },
}; };
/** /**
......
...@@ -40,6 +40,7 @@ static const unsigned long kvm_isa_ext_arr[] = { ...@@ -40,6 +40,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT), KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT), KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZACAS),
KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB), KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZBC), KVM_ISA_EXT_ARR(ZBC),
...@@ -66,6 +67,7 @@ static const unsigned long kvm_isa_ext_arr[] = { ...@@ -66,6 +67,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZKSED), KVM_ISA_EXT_ARR(ZKSED),
KVM_ISA_EXT_ARR(ZKSH), KVM_ISA_EXT_ARR(ZKSH),
KVM_ISA_EXT_ARR(ZKT), KVM_ISA_EXT_ARR(ZKT),
KVM_ISA_EXT_ARR(ZTSO),
KVM_ISA_EXT_ARR(ZVBB), KVM_ISA_EXT_ARR(ZVBB),
KVM_ISA_EXT_ARR(ZVBC), KVM_ISA_EXT_ARR(ZVBC),
KVM_ISA_EXT_ARR(ZVFH), KVM_ISA_EXT_ARR(ZVFH),
...@@ -117,6 +119,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) ...@@ -117,6 +119,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SSTC: case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZACAS:
case KVM_RISCV_ISA_EXT_ZBA: case KVM_RISCV_ISA_EXT_ZBA:
case KVM_RISCV_ISA_EXT_ZBB: case KVM_RISCV_ISA_EXT_ZBB:
case KVM_RISCV_ISA_EXT_ZBC: case KVM_RISCV_ISA_EXT_ZBC:
...@@ -141,6 +144,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) ...@@ -141,6 +144,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_ZKSED: case KVM_RISCV_ISA_EXT_ZKSED:
case KVM_RISCV_ISA_EXT_ZKSH: case KVM_RISCV_ISA_EXT_ZKSH:
case KVM_RISCV_ISA_EXT_ZKT: case KVM_RISCV_ISA_EXT_ZKT:
case KVM_RISCV_ISA_EXT_ZTSO:
case KVM_RISCV_ISA_EXT_ZVBB: case KVM_RISCV_ISA_EXT_ZVBB:
case KVM_RISCV_ISA_EXT_ZVBC: case KVM_RISCV_ISA_EXT_ZVBC:
case KVM_RISCV_ISA_EXT_ZVFH: case KVM_RISCV_ISA_EXT_ZVFH:
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
#ifndef __ASSEMBLY__
#include <asm-generic/barrier.h>
static inline void cpu_relax(void)
{
#ifdef __riscv_muldiv
int dummy;
/* In lieu of a halt instruction, induce a long-latency stall. */
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
/*
* Reduce instruction retirement.
* This assumes the PC changes.
*/
__asm__ __volatile__ ("pause");
#else
/* Encoding of the pause instruction */
__asm__ __volatile__ (".4byte 0x100000F");
#endif
barrier();
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
...@@ -53,6 +53,7 @@ LIBKVM_s390x += lib/s390x/diag318_test_handler.c ...@@ -53,6 +53,7 @@ LIBKVM_s390x += lib/s390x/diag318_test_handler.c
LIBKVM_s390x += lib/s390x/processor.c LIBKVM_s390x += lib/s390x/processor.c
LIBKVM_s390x += lib/s390x/ucall.c LIBKVM_s390x += lib/s390x/ucall.c
LIBKVM_riscv += lib/riscv/handlers.S
LIBKVM_riscv += lib/riscv/processor.c LIBKVM_riscv += lib/riscv/processor.c
LIBKVM_riscv += lib/riscv/ucall.c LIBKVM_riscv += lib/riscv/ucall.c
...@@ -143,7 +144,6 @@ TEST_GEN_PROGS_x86_64 += system_counter_offset_test ...@@ -143,7 +144,6 @@ TEST_GEN_PROGS_x86_64 += system_counter_offset_test
TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
...@@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/vgic_init ...@@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
TEST_GEN_PROGS_aarch64 += access_tracking_perf_test TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += arch_timer
TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
...@@ -184,6 +185,7 @@ TEST_GEN_PROGS_s390x += rseq_test ...@@ -184,6 +185,7 @@ TEST_GEN_PROGS_s390x += rseq_test
TEST_GEN_PROGS_s390x += set_memory_region_test TEST_GEN_PROGS_s390x += set_memory_region_test
TEST_GEN_PROGS_s390x += kvm_binary_stats_test TEST_GEN_PROGS_s390x += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += arch_timer
TEST_GEN_PROGS_riscv += demand_paging_test TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test TEST_GEN_PROGS_riscv += dirty_log_test
TEST_GEN_PROGS_riscv += get-reg-list TEST_GEN_PROGS_riscv += get-reg-list
...@@ -194,6 +196,7 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test ...@@ -194,6 +196,7 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += steal_time TEST_GEN_PROGS_riscv += steal_time
SPLIT_TESTS += arch_timer
SPLIT_TESTS += get-reg-list SPLIT_TESTS += get-reg-list
TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR)) TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
...@@ -217,7 +220,7 @@ else ...@@ -217,7 +220,7 @@ else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-Wno-gnu-variable-sized-type-not-at-end -MD -MP \ -Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \ -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
-fno-builtin-strnlen \ -fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
...@@ -260,32 +263,36 @@ LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C)) ...@@ -260,32 +263,36 @@ LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S)) LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING)) LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ) LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
SPLIT_TESTS_TARGETS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS)) SPLIT_TEST_GEN_PROGS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS))
SPLIT_TESTS_OBJS := $(patsubst %, $(ARCH_DIR)/%.o, $(SPLIT_TESTS)) SPLIT_TEST_GEN_OBJ := $(patsubst %, $(OUTPUT)/$(ARCH_DIR)/%.o, $(SPLIT_TESTS))
TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS)) TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED)) TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ)) TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS)) TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TESTS_OBJS)) TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TEST_GEN_OBJ))
-include $(TEST_DEP_FILES) -include $(TEST_DEP_FILES)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o x := $(shell mkdir -p $(sort $(OUTPUT)/$(ARCH_DIR) $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(filter-out $(SPLIT_TEST_GEN_PROGS), $(TEST_GEN_PROGS)) \
$(TEST_GEN_PROGS_EXTENDED): %: %.o
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@ $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@
$(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c $(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
$(SPLIT_TESTS_TARGETS): %: %.o $(SPLIT_TESTS_OBJS) $(SPLIT_TEST_GEN_PROGS): $(OUTPUT)/%: $(OUTPUT)/%.o $(OUTPUT)/$(ARCH_DIR)/%.o
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@ $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@
$(SPLIT_TEST_GEN_OBJ): $(OUTPUT)/$(ARCH_DIR)/%.o: $(ARCH_DIR)/%.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
EXTRA_CLEAN += $(GEN_HDRS) \ EXTRA_CLEAN += $(GEN_HDRS) \
$(LIBKVM_OBJS) \ $(LIBKVM_OBJS) \
$(SPLIT_TESTS_OBJS) \ $(SPLIT_TEST_GEN_OBJ) \
$(TEST_DEP_FILES) \ $(TEST_DEP_FILES) \
$(TEST_GEN_OBJ) \ $(TEST_GEN_OBJ) \
cscope.* cscope.*
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(GEN_HDRS) $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(GEN_HDRS)
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
...@@ -299,7 +306,7 @@ $(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c ...@@ -299,7 +306,7 @@ $(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@
x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS)))) x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
$(SPLIT_TESTS_OBJS): $(GEN_HDRS) $(SPLIT_TEST_GEN_OBJ): $(GEN_HDRS)
$(TEST_GEN_PROGS): $(LIBKVM_OBJS) $(TEST_GEN_PROGS): $(LIBKVM_OBJS)
$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS) $(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
$(TEST_GEN_OBJ): $(GEN_HDRS) $(TEST_GEN_OBJ): $(GEN_HDRS)
......
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* arch_timer.c - Tests the aarch64 timer IRQ functionality
*
* The test validates both the virtual and physical timer IRQs using * The test validates both the virtual and physical timer IRQs using
* CVAL and TVAL registers. This consitutes the four stages in the test. * CVAL and TVAL registers.
* The guest's main thread configures the timer interrupt for a stage
* and waits for it to fire, with a timeout equal to the timer period.
* It asserts that the timeout doesn't exceed the timer period.
*
* On the other hand, upon receipt of an interrupt, the guest's interrupt
* handler validates the interrupt by checking if the architectural state
* is in compliance with the specifications.
*
* The test provides command-line options to configure the timer's
* period (-p), number of vCPUs (-n), and iterations per stage (-i).
* To stress-test the timer stack even more, an option to migrate the
* vCPUs across pCPUs (-m), at a particular rate, is also provided.
* *
* Copyright (c) 2021, Google LLC. * Copyright (c) 2021, Google LLC.
*/ */
#define _GNU_SOURCE #define _GNU_SOURCE
#include <stdlib.h>
#include <pthread.h>
#include <linux/kvm.h>
#include <linux/sizes.h>
#include <linux/bitmap.h>
#include <sys/sysinfo.h>
#include "kvm_util.h"
#include "processor.h"
#include "delay.h"
#include "arch_timer.h" #include "arch_timer.h"
#include "delay.h"
#include "gic.h" #include "gic.h"
#include "processor.h"
#include "timer_test.h"
#include "vgic.h" #include "vgic.h"
#define NR_VCPUS_DEF 4
#define NR_TEST_ITERS_DEF 5
#define TIMER_TEST_PERIOD_MS_DEF 10
#define TIMER_TEST_ERR_MARGIN_US 100
#define TIMER_TEST_MIGRATION_FREQ_MS 2
struct test_args {
int nr_vcpus;
int nr_iter;
int timer_period_ms;
int migration_freq_ms;
struct kvm_arm_counter_offset offset;
};
static struct test_args test_args = {
.nr_vcpus = NR_VCPUS_DEF,
.nr_iter = NR_TEST_ITERS_DEF,
.timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
.migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
.offset = { .reserved = 1 },
};
#define msecs_to_usecs(msec) ((msec) * 1000LL)
#define GICD_BASE_GPA 0x8000000ULL #define GICD_BASE_GPA 0x8000000ULL
#define GICR_BASE_GPA 0x80A0000ULL #define GICR_BASE_GPA 0x80A0000ULL
...@@ -70,22 +25,8 @@ enum guest_stage { ...@@ -70,22 +25,8 @@ enum guest_stage {
GUEST_STAGE_MAX, GUEST_STAGE_MAX,
}; };
/* Shared variables between host and guest */
struct test_vcpu_shared_data {
int nr_iter;
enum guest_stage guest_stage;
uint64_t xcnt;
};
static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
static int vtimer_irq, ptimer_irq; static int vtimer_irq, ptimer_irq;
static unsigned long *vcpu_done_map;
static pthread_mutex_t vcpu_done_map_lock;
static void static void
guest_configure_timer_action(struct test_vcpu_shared_data *shared_data) guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
{ {
...@@ -190,10 +131,14 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data, ...@@ -190,10 +131,14 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
/* Setup a timeout for the interrupt to arrive */ /* Setup a timeout for the interrupt to arrive */
udelay(msecs_to_usecs(test_args.timer_period_ms) + udelay(msecs_to_usecs(test_args.timer_period_ms) +
TIMER_TEST_ERR_MARGIN_US); test_args.timer_err_margin_us);
irq_iter = READ_ONCE(shared_data->nr_iter); irq_iter = READ_ONCE(shared_data->nr_iter);
GUEST_ASSERT_EQ(config_iter + 1, irq_iter); __GUEST_ASSERT(config_iter + 1 == irq_iter,
"config_iter + 1 = 0x%lx, irq_iter = 0x%lx.\n"
" Guest timer interrupt was not trigged within the specified\n"
" interval, try to increase the error margin by [-e] option.\n",
config_iter + 1, irq_iter);
} }
} }
...@@ -222,137 +167,6 @@ static void guest_code(void) ...@@ -222,137 +167,6 @@ static void guest_code(void)
GUEST_DONE(); GUEST_DONE();
} }
static void *test_vcpu_run(void *arg)
{
unsigned int vcpu_idx = (unsigned long)arg;
struct ucall uc;
struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
struct kvm_vm *vm = vcpu->vm;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
vcpu_run(vcpu);
/* Currently, any exit from guest is an indication of completion */
pthread_mutex_lock(&vcpu_done_map_lock);
__set_bit(vcpu_idx, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
case UCALL_ABORT:
sync_global_from_guest(vm, *shared_data);
fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n",
vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit");
}
return NULL;
}
static uint32_t test_get_pcpu(void)
{
uint32_t pcpu;
unsigned int nproc_conf;
cpu_set_t online_cpuset;
nproc_conf = get_nprocs_conf();
sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
/* Randomly find an available pCPU to place a vCPU on */
do {
pcpu = rand() % nproc_conf;
} while (!CPU_ISSET(pcpu, &online_cpuset));
return pcpu;
}
static int test_migrate_vcpu(unsigned int vcpu_idx)
{
int ret;
cpu_set_t cpuset;
uint32_t new_pcpu = test_get_pcpu();
CPU_ZERO(&cpuset);
CPU_SET(new_pcpu, &cpuset);
pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
sizeof(cpuset), &cpuset);
/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
vcpu_idx, new_pcpu, ret);
return ret;
}
static void *test_vcpu_migration(void *arg)
{
unsigned int i, n_done;
bool vcpu_done;
do {
usleep(msecs_to_usecs(test_args.migration_freq_ms));
for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
pthread_mutex_lock(&vcpu_done_map_lock);
vcpu_done = test_bit(i, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
if (vcpu_done) {
n_done++;
continue;
}
test_migrate_vcpu(i);
}
} while (test_args.nr_vcpus != n_done);
return NULL;
}
static void test_run(struct kvm_vm *vm)
{
pthread_t pt_vcpu_migration;
unsigned int i;
int ret;
pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
(void *)(unsigned long)i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
}
/* Spawn a thread to control the vCPU migrations */
if (test_args.migration_freq_ms) {
srand(time(NULL));
ret = pthread_create(&pt_vcpu_migration, NULL,
test_vcpu_migration, NULL);
TEST_ASSERT(!ret, "Failed to create the migration pthread");
}
for (i = 0; i < test_args.nr_vcpus; i++)
pthread_join(pt_vcpu_run[i], NULL);
if (test_args.migration_freq_ms)
pthread_join(pt_vcpu_migration, NULL);
bitmap_free(vcpu_done_map);
}
static void test_init_timer_irq(struct kvm_vm *vm) static void test_init_timer_irq(struct kvm_vm *vm)
{ {
/* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
...@@ -369,7 +183,7 @@ static void test_init_timer_irq(struct kvm_vm *vm) ...@@ -369,7 +183,7 @@ static void test_init_timer_irq(struct kvm_vm *vm)
static int gic_fd; static int gic_fd;
static struct kvm_vm *test_vm_create(void) struct kvm_vm *test_vm_create(void)
{ {
struct kvm_vm *vm; struct kvm_vm *vm;
unsigned int i; unsigned int i;
...@@ -380,10 +194,14 @@ static struct kvm_vm *test_vm_create(void) ...@@ -380,10 +194,14 @@ static struct kvm_vm *test_vm_create(void)
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
if (!test_args.offset.reserved) { if (!test_args.reserved) {
if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET)) if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET)) {
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset); struct kvm_arm_counter_offset offset = {
else .counter_offset = test_args.counter_offset,
.reserved = 0,
};
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &offset);
} else
TEST_FAIL("no support for global offset"); TEST_FAIL("no support for global offset");
} }
...@@ -400,81 +218,8 @@ static struct kvm_vm *test_vm_create(void) ...@@ -400,81 +218,8 @@ static struct kvm_vm *test_vm_create(void)
return vm; return vm;
} }
static void test_vm_cleanup(struct kvm_vm *vm) void test_vm_cleanup(struct kvm_vm *vm)
{ {
close(gic_fd); close(gic_fd);
kvm_vm_free(vm); kvm_vm_free(vm);
} }
static void test_print_help(char *name)
{
pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
name);
pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
NR_VCPUS_DEF, KVM_MAX_VCPUS);
pr_info("\t-i: Number of iterations per stage (default: %u)\n",
NR_TEST_ITERS_DEF);
pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
TIMER_TEST_PERIOD_MS_DEF);
pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
TIMER_TEST_MIGRATION_FREQ_MS);
pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n");
pr_info("\t-h: print this help screen\n");
}
static bool parse_args(int argc, char *argv[])
{
int opt;
while ((opt = getopt(argc, argv, "hn:i:p:m:o:")) != -1) {
switch (opt) {
case 'n':
test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
pr_info("Max allowed vCPUs: %u\n",
KVM_MAX_VCPUS);
goto err;
}
break;
case 'i':
test_args.nr_iter = atoi_positive("Number of iterations", optarg);
break;
case 'p':
test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
break;
case 'm':
test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
break;
case 'o':
test_args.offset.counter_offset = strtol(optarg, NULL, 0);
test_args.offset.reserved = 0;
break;
case 'h':
default:
goto err;
}
}
return true;
err:
test_print_help(argv[0]);
return false;
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
__TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
"At least two physical CPUs needed for vCPU migration");
vm = test_vm_create();
test_run(vm);
test_vm_cleanup(vm);
return 0;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch_timer.c - Tests the arch timer IRQ functionality
*
* The guest's main thread configures the timer interrupt and waits
* for it to fire, with a timeout equal to the timer period.
* It asserts that the timeout doesn't exceed the timer period plus
* a user configurable error margin(default to 100us)
*
* On the other hand, upon receipt of an interrupt, the guest's interrupt
* handler validates the interrupt by checking if the architectural state
* is in compliance with the specifications.
*
* The test provides command-line options to configure the timer's
* period (-p), number of vCPUs (-n), iterations per stage (-i) and timer
* interrupt arrival error margin (-e). To stress-test the timer stack
* even more, an option to migrate the vCPUs across pCPUs (-m), at a
* particular rate, is also provided.
*
* Copyright (c) 2021, Google LLC.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <pthread.h>
#include <linux/sizes.h>
#include <linux/bitmap.h>
#include <sys/sysinfo.h>
#include "timer_test.h"
struct test_args test_args = {
.nr_vcpus = NR_VCPUS_DEF,
.nr_iter = NR_TEST_ITERS_DEF,
.timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
.migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
.timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US,
.reserved = 1,
};
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
static unsigned long *vcpu_done_map;
static pthread_mutex_t vcpu_done_map_lock;
static void *test_vcpu_run(void *arg)
{
unsigned int vcpu_idx = (unsigned long)arg;
struct ucall uc;
struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
struct kvm_vm *vm = vcpu->vm;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
vcpu_run(vcpu);
/* Currently, any exit from guest is an indication of completion */
pthread_mutex_lock(&vcpu_done_map_lock);
__set_bit(vcpu_idx, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
case UCALL_ABORT:
sync_global_from_guest(vm, *shared_data);
fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n",
vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit");
}
pr_info("PASS(vCPU-%d).\n", vcpu_idx);
return NULL;
}
static uint32_t test_get_pcpu(void)
{
uint32_t pcpu;
unsigned int nproc_conf;
cpu_set_t online_cpuset;
nproc_conf = get_nprocs_conf();
sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
/* Randomly find an available pCPU to place a vCPU on */
do {
pcpu = rand() % nproc_conf;
} while (!CPU_ISSET(pcpu, &online_cpuset));
return pcpu;
}
static int test_migrate_vcpu(unsigned int vcpu_idx)
{
int ret;
cpu_set_t cpuset;
uint32_t new_pcpu = test_get_pcpu();
CPU_ZERO(&cpuset);
CPU_SET(new_pcpu, &cpuset);
pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
sizeof(cpuset), &cpuset);
/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
vcpu_idx, new_pcpu, ret);
return ret;
}
static void *test_vcpu_migration(void *arg)
{
unsigned int i, n_done;
bool vcpu_done;
do {
usleep(msecs_to_usecs(test_args.migration_freq_ms));
for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
pthread_mutex_lock(&vcpu_done_map_lock);
vcpu_done = test_bit(i, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
if (vcpu_done) {
n_done++;
continue;
}
test_migrate_vcpu(i);
}
} while (test_args.nr_vcpus != n_done);
return NULL;
}
static void test_run(struct kvm_vm *vm)
{
pthread_t pt_vcpu_migration;
unsigned int i;
int ret;
pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
(void *)(unsigned long)i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
}
/* Spawn a thread to control the vCPU migrations */
if (test_args.migration_freq_ms) {
srand(time(NULL));
ret = pthread_create(&pt_vcpu_migration, NULL,
test_vcpu_migration, NULL);
TEST_ASSERT(!ret, "Failed to create the migration pthread");
}
for (i = 0; i < test_args.nr_vcpus; i++)
pthread_join(pt_vcpu_run[i], NULL);
if (test_args.migration_freq_ms)
pthread_join(pt_vcpu_migration, NULL);
bitmap_free(vcpu_done_map);
}
static void test_print_help(char *name)
{
pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n"
"\t\t [-m migration_freq_ms] [-o counter_offset]\n"
"\t\t [-e timer_err_margin_us]\n", name);
pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
NR_VCPUS_DEF, KVM_MAX_VCPUS);
pr_info("\t-i: Number of iterations per stage (default: %u)\n",
NR_TEST_ITERS_DEF);
pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
TIMER_TEST_PERIOD_MS_DEF);
pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
TIMER_TEST_MIGRATION_FREQ_MS);
pr_info("\t-o: Counter offset (in counter cycles, default: 0) [aarch64-only]\n");
pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n",
TIMER_TEST_ERR_MARGIN_US);
pr_info("\t-h: print this help screen\n");
}
static bool parse_args(int argc, char *argv[])
{
int opt;
while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) {
switch (opt) {
case 'n':
test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
pr_info("Max allowed vCPUs: %u\n",
KVM_MAX_VCPUS);
goto err;
}
break;
case 'i':
test_args.nr_iter = atoi_positive("Number of iterations", optarg);
break;
case 'p':
test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
break;
case 'm':
test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
break;
case 'e':
test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg);
break;
case 'o':
test_args.counter_offset = strtol(optarg, NULL, 0);
test_args.reserved = 0;
break;
case 'h':
default:
goto err;
}
}
return true;
err:
test_print_help(argv[0]);
return false;
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
__TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
"At least two physical CPUs needed for vCPU migration");
vm = test_vm_create();
test_run(vm);
test_vm_cleanup(vm);
return 0;
}
...@@ -226,8 +226,4 @@ void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, ...@@ -226,8 +226,4 @@ void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
uint64_t arg6, struct arm_smccc_res *res); uint64_t arg6, struct arm_smccc_res *res);
uint32_t guest_get_vcpuid(void);
#endif /* SELFTEST_KVM_PROCESSOR_H */ #endif /* SELFTEST_KVM_PROCESSOR_H */
...@@ -1081,4 +1081,6 @@ void kvm_selftest_arch_init(void); ...@@ -1081,4 +1081,6 @@ void kvm_selftest_arch_init(void);
void kvm_arch_vm_post_create(struct kvm_vm *vm); void kvm_arch_vm_post_create(struct kvm_vm *vm);
uint32_t guest_get_vcpuid(void);
#endif /* SELFTEST_KVM_UTIL_BASE_H */ #endif /* SELFTEST_KVM_UTIL_BASE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* RISC-V Arch Timer(sstc) specific interface
*
* Copyright (c) 2024 Intel Corporation
*/
#ifndef SELFTEST_KVM_ARCH_TIMER_H
#define SELFTEST_KVM_ARCH_TIMER_H
#include <asm/csr.h>
#include <asm/vdso/processor.h>
static unsigned long timer_freq;
#define msec_to_cycles(msec) \
((timer_freq) * (uint64_t)(msec) / 1000)
#define usec_to_cycles(usec) \
((timer_freq) * (uint64_t)(usec) / 1000000)
#define cycles_to_usec(cycles) \
((uint64_t)(cycles) * 1000000 / (timer_freq))
static inline uint64_t timer_get_cycles(void)
{
return csr_read(CSR_TIME);
}
static inline void timer_set_cmp(uint64_t cval)
{
csr_write(CSR_STIMECMP, cval);
}
static inline uint64_t timer_get_cmp(void)
{
return csr_read(CSR_STIMECMP);
}
static inline void timer_irq_enable(void)
{
csr_set(CSR_SIE, IE_TIE);
}
static inline void timer_irq_disable(void)
{
csr_clear(CSR_SIE, IE_TIE);
}
static inline void timer_set_next_cmp_ms(uint32_t msec)
{
uint64_t now_ct = timer_get_cycles();
uint64_t next_ct = now_ct + msec_to_cycles(msec);
timer_set_cmp(next_ct);
}
static inline void __delay(uint64_t cycles)
{
uint64_t start = timer_get_cycles();
while ((timer_get_cycles() - start) < cycles)
cpu_relax();
}
static inline void udelay(unsigned long usec)
{
__delay(usec_to_cycles(usec));
}
#endif /* SELFTEST_KVM_ARCH_TIMER_H */
...@@ -7,8 +7,9 @@ ...@@ -7,8 +7,9 @@
#ifndef SELFTEST_KVM_PROCESSOR_H #ifndef SELFTEST_KVM_PROCESSOR_H
#define SELFTEST_KVM_PROCESSOR_H #define SELFTEST_KVM_PROCESSOR_H
#include "kvm_util.h"
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/csr.h>
#include "kvm_util.h"
static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
uint64_t idx, uint64_t size) uint64_t idx, uint64_t size)
...@@ -47,6 +48,58 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, ...@@ -47,6 +48,58 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
KVM_REG_RISCV_SBI_SINGLE, \ KVM_REG_RISCV_SBI_SINGLE, \
idx, KVM_REG_SIZE_ULONG) idx, KVM_REG_SIZE_ULONG)
bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext);
struct ex_regs {
unsigned long ra;
unsigned long sp;
unsigned long gp;
unsigned long tp;
unsigned long t0;
unsigned long t1;
unsigned long t2;
unsigned long s0;
unsigned long s1;
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
unsigned long s2;
unsigned long s3;
unsigned long s4;
unsigned long s5;
unsigned long s6;
unsigned long s7;
unsigned long s8;
unsigned long s9;
unsigned long s10;
unsigned long s11;
unsigned long t3;
unsigned long t4;
unsigned long t5;
unsigned long t6;
unsigned long epc;
unsigned long status;
unsigned long cause;
};
#define NR_VECTORS 2
#define NR_EXCEPTIONS 32
#define EC_MASK (NR_EXCEPTIONS - 1)
typedef void(*exception_handler_fn)(struct ex_regs *);
void vm_init_vector_tables(struct kvm_vm *vm);
void vcpu_init_vector_tables(struct kvm_vcpu *vcpu);
void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler);
void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler);
/* L3 index Bit[47:39] */ /* L3 index Bit[47:39] */
#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
#define PGTBL_L3_INDEX_SHIFT 39 #define PGTBL_L3_INDEX_SHIFT 39
...@@ -101,13 +154,6 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, ...@@ -101,13 +154,6 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
#define PGTBL_PAGE_SIZE PGTBL_L0_BLOCK_SIZE #define PGTBL_PAGE_SIZE PGTBL_L0_BLOCK_SIZE
#define PGTBL_PAGE_SIZE_SHIFT PGTBL_L0_BLOCK_SHIFT #define PGTBL_PAGE_SIZE_SHIFT PGTBL_L0_BLOCK_SHIFT
#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
#define SATP_MODE_39 _AC(0x8000000000000000, UL)
#define SATP_MODE_48 _AC(0x9000000000000000, UL)
#define SATP_ASID_BITS 16
#define SATP_ASID_SHIFT 44
#define SATP_ASID_MASK _AC(0xFFFF, UL)
/* SBI return error codes */ /* SBI return error codes */
#define SBI_SUCCESS 0 #define SBI_SUCCESS 0
#define SBI_ERR_FAILURE -1 #define SBI_ERR_FAILURE -1
...@@ -147,4 +193,14 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, ...@@ -147,4 +193,14 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
bool guest_sbi_probe_extension(int extid, long *out_val); bool guest_sbi_probe_extension(int extid, long *out_val);
static inline void local_irq_enable(void)
{
csr_set(CSR_SSTATUS, SR_SIE);
}
static inline void local_irq_disable(void)
{
csr_clear(CSR_SSTATUS, SR_SIE);
}
#endif /* SELFTEST_KVM_PROCESSOR_H */ #endif /* SELFTEST_KVM_PROCESSOR_H */
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#include <sys/mman.h> #include <sys/mman.h>
#include "kselftest.h" #include "kselftest.h"
#define msecs_to_usecs(msec) ((msec) * 1000ULL)
static inline int _no_printf(const char *format, ...) { return 0; } static inline int _no_printf(const char *format, ...) { return 0; }
#ifdef DEBUG #ifdef DEBUG
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* timer test specific header
*
* Copyright (C) 2018, Google LLC
*/
#ifndef SELFTEST_KVM_TIMER_TEST_H
#define SELFTEST_KVM_TIMER_TEST_H
#include "kvm_util.h"
#define NR_VCPUS_DEF 4
#define NR_TEST_ITERS_DEF 5
#define TIMER_TEST_PERIOD_MS_DEF 10
#define TIMER_TEST_ERR_MARGIN_US 100
#define TIMER_TEST_MIGRATION_FREQ_MS 2
/* Timer test cmdline parameters */
struct test_args {
uint32_t nr_vcpus;
uint32_t nr_iter;
uint32_t timer_period_ms;
uint32_t migration_freq_ms;
uint32_t timer_err_margin_us;
/* Members of struct kvm_arm_counter_offset */
uint64_t counter_offset;
uint64_t reserved;
};
/* Shared variables between host and guest */
struct test_vcpu_shared_data {
uint32_t nr_iter;
int guest_stage;
uint64_t xcnt;
};
extern struct test_args test_args;
extern struct kvm_vcpu *vcpus[];
extern struct test_vcpu_shared_data vcpu_shared_data[];
struct kvm_vm *test_vm_create(void);
void test_vm_cleanup(struct kvm_vm *vm);
#endif /* SELFTEST_KVM_TIMER_TEST_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Intel Corporation
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <asm/csr.h>
.macro save_context
addi sp, sp, (-8*34)
sd x1, 0(sp)
sd x2, 8(sp)
sd x3, 16(sp)
sd x4, 24(sp)
sd x5, 32(sp)
sd x6, 40(sp)
sd x7, 48(sp)
sd x8, 56(sp)
sd x9, 64(sp)
sd x10, 72(sp)
sd x11, 80(sp)
sd x12, 88(sp)
sd x13, 96(sp)
sd x14, 104(sp)
sd x15, 112(sp)
sd x16, 120(sp)
sd x17, 128(sp)
sd x18, 136(sp)
sd x19, 144(sp)
sd x20, 152(sp)
sd x21, 160(sp)
sd x22, 168(sp)
sd x23, 176(sp)
sd x24, 184(sp)
sd x25, 192(sp)
sd x26, 200(sp)
sd x27, 208(sp)
sd x28, 216(sp)
sd x29, 224(sp)
sd x30, 232(sp)
sd x31, 240(sp)
csrr s0, CSR_SEPC
csrr s1, CSR_SSTATUS
csrr s2, CSR_SCAUSE
sd s0, 248(sp)
sd s1, 256(sp)
sd s2, 264(sp)
.endm
.macro restore_context
ld s2, 264(sp)
ld s1, 256(sp)
ld s0, 248(sp)
csrw CSR_SCAUSE, s2
csrw CSR_SSTATUS, s1
csrw CSR_SEPC, s0
ld x31, 240(sp)
ld x30, 232(sp)
ld x29, 224(sp)
ld x28, 216(sp)
ld x27, 208(sp)
ld x26, 200(sp)
ld x25, 192(sp)
ld x24, 184(sp)
ld x23, 176(sp)
ld x22, 168(sp)
ld x21, 160(sp)
ld x20, 152(sp)
ld x19, 144(sp)
ld x18, 136(sp)
ld x17, 128(sp)
ld x16, 120(sp)
ld x15, 112(sp)
ld x14, 104(sp)
ld x13, 96(sp)
ld x12, 88(sp)
ld x11, 80(sp)
ld x10, 72(sp)
ld x9, 64(sp)
ld x8, 56(sp)
ld x7, 48(sp)
ld x6, 40(sp)
ld x5, 32(sp)
ld x4, 24(sp)
ld x3, 16(sp)
ld x2, 8(sp)
ld x1, 0(sp)
addi sp, sp, (8*34)
.endm
.balign 4
.global exception_vectors
exception_vectors:
save_context
move a0, sp
call route_exception
restore_context
sret
...@@ -13,6 +13,18 @@ ...@@ -13,6 +13,18 @@
#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000
static vm_vaddr_t exception_handlers;
bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext)
{
unsigned long value = 0;
int ret;
ret = __vcpu_get_reg(vcpu, ext, &value);
return !ret && !!value;
}
static uint64_t page_align(struct kvm_vm *vm, uint64_t v) static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
{ {
return (v + vm->page_size) & ~(vm->page_size - 1); return (v + vm->page_size) & ~(vm->page_size - 1);
...@@ -314,6 +326,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -314,6 +326,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
/* Setup sscratch for guest_get_vcpuid() */
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id);
/* Setup default exception vector of guest */ /* Setup default exception vector of guest */
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap); vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
...@@ -364,8 +379,80 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) ...@@ -364,8 +379,80 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
va_end(ap); va_end(ap);
} }
void kvm_exit_unexpected_exception(int vector, int ec)
{
ucall(UCALL_UNHANDLED, 2, vector, ec);
}
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{ {
struct ucall uc;
if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
uc.args[0], uc.args[1]);
}
}
struct handlers {
exception_handler_fn exception_handlers[NR_VECTORS][NR_EXCEPTIONS];
};
void route_exception(struct ex_regs *regs)
{
struct handlers *handlers = (struct handlers *)exception_handlers;
int vector = 0, ec;
ec = regs->cause & ~CAUSE_IRQ_FLAG;
if (ec >= NR_EXCEPTIONS)
goto unexpected_exception;
/* Use the same handler for all the interrupts */
if (regs->cause & CAUSE_IRQ_FLAG) {
vector = 1;
ec = 0;
}
if (handlers && handlers->exception_handlers[vector][ec])
return handlers->exception_handlers[vector][ec](regs);
unexpected_exception:
return kvm_exit_unexpected_exception(vector, ec);
}
void vcpu_init_vector_tables(struct kvm_vcpu *vcpu)
{
extern char exception_vectors;
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)&exception_vectors);
}
void vm_init_vector_tables(struct kvm_vm *vm)
{
vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
vm->page_size, MEM_REGION_DATA);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
{
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
assert(vector < NR_EXCEPTIONS);
handlers->exception_handlers[0][vector] = handler;
}
void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler)
{
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
handlers->exception_handlers[1][0] = handler;
}
uint32_t guest_get_vcpuid(void)
{
return csr_read(CSR_SSCRATCH);
} }
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch_timer.c - Tests the riscv64 sstc timer IRQ functionality
*
* The test validates the sstc timer IRQs using vstimecmp registers.
* It's ported from the aarch64 arch_timer test.
*
* Copyright (c) 2024, Intel Corporation.
*/
#define _GNU_SOURCE
#include "arch_timer.h"
#include "kvm_util.h"
#include "processor.h"
#include "timer_test.h"
static int timer_irq = IRQ_S_TIMER;
static void guest_irq_handler(struct ex_regs *regs)
{
uint64_t xcnt, xcnt_diff_us, cmp;
unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG;
uint32_t cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
timer_irq_disable();
xcnt = timer_get_cycles();
cmp = timer_get_cmp();
xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
/* Make sure we are dealing with the correct timer IRQ */
GUEST_ASSERT_EQ(intid, timer_irq);
__GUEST_ASSERT(xcnt >= cmp,
"xcnt = 0x%"PRIx64", cmp = 0x%"PRIx64", xcnt_diff_us = 0x%" PRIx64,
xcnt, cmp, xcnt_diff_us);
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
}
static void guest_run(struct test_vcpu_shared_data *shared_data)
{
uint32_t irq_iter, config_iter;
shared_data->nr_iter = 0;
shared_data->guest_stage = 0;
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
/* Setup the next interrupt */
timer_set_next_cmp_ms(test_args.timer_period_ms);
shared_data->xcnt = timer_get_cycles();
timer_irq_enable();
/* Setup a timeout for the interrupt to arrive */
udelay(msecs_to_usecs(test_args.timer_period_ms) +
test_args.timer_err_margin_us);
irq_iter = READ_ONCE(shared_data->nr_iter);
__GUEST_ASSERT(config_iter + 1 == irq_iter,
"config_iter + 1 = 0x%x, irq_iter = 0x%x.\n"
" Guest timer interrupt was not trigged within the specified\n"
" interval, try to increase the error margin by [-e] option.\n",
config_iter + 1, irq_iter);
}
}
static void guest_code(void)
{
uint32_t cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
timer_irq_disable();
local_irq_enable();
guest_run(shared_data);
GUEST_DONE();
}
struct kvm_vm *test_vm_create(void)
{
struct kvm_vm *vm;
int nr_vcpus = test_args.nr_vcpus;
vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
__TEST_REQUIRE(__vcpu_has_ext(vcpus[0], RISCV_ISA_EXT_REG(KVM_RISCV_ISA_EXT_SSTC)),
"SSTC not available, skipping test\n");
vm_init_vector_tables(vm);
vm_install_interrupt_handler(vm, guest_irq_handler);
for (int i = 0; i < nr_vcpus; i++)
vcpu_init_vector_tables(vcpus[i]);
/* Initialize guest timer frequency. */
vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency), &timer_freq);
sync_global_to_guest(vm, timer_freq);
pr_debug("timer_freq: %lu\n", timer_freq);
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
return vm;
}
void test_vm_cleanup(struct kvm_vm *vm)
{
kvm_vm_free(vm);
}
...@@ -47,6 +47,7 @@ bool filter_reg(__u64 reg) ...@@ -47,6 +47,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZACAS:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC:
...@@ -73,6 +74,7 @@ bool filter_reg(__u64 reg) ...@@ -73,6 +74,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSED: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSED:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSH: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSH:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKT: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZTSO:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH:
...@@ -123,15 +125,6 @@ bool check_reject_set(int err) ...@@ -123,15 +125,6 @@ bool check_reject_set(int err)
return err == EINVAL; return err == EINVAL;
} }
static bool vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext_id)
{
int ret;
unsigned long value;
ret = __vcpu_get_reg(vcpu, ext_id, &value);
return (ret) ? false : !!value;
}
void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{ {
unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
...@@ -176,7 +169,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) ...@@ -176,7 +169,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
__vcpu_set_reg(vcpu, feature, 1); __vcpu_set_reg(vcpu, feature, 1);
/* Double check whether the desired extension was enabled */ /* Double check whether the desired extension was enabled */
__TEST_REQUIRE(vcpu_has_ext(vcpu, feature), __TEST_REQUIRE(__vcpu_has_ext(vcpu, feature),
"%s not available, skipping tests", s->name); "%s not available, skipping tests", s->name);
} }
} }
...@@ -419,6 +412,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) ...@@ -419,6 +412,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT), KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT), KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZACAS),
KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB), KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZBC), KVM_ISA_EXT_ARR(ZBC),
...@@ -445,6 +439,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) ...@@ -445,6 +439,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZKSED), KVM_ISA_EXT_ARR(ZKSED),
KVM_ISA_EXT_ARR(ZKSH), KVM_ISA_EXT_ARR(ZKSH),
KVM_ISA_EXT_ARR(ZKT), KVM_ISA_EXT_ARR(ZKT),
KVM_ISA_EXT_ARR(ZTSO),
KVM_ISA_EXT_ARR(ZVBB), KVM_ISA_EXT_ARR(ZVBB),
KVM_ISA_EXT_ARR(ZVBC), KVM_ISA_EXT_ARR(ZVBC),
KVM_ISA_EXT_ARR(ZVFH), KVM_ISA_EXT_ARR(ZVFH),
...@@ -940,6 +935,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC); ...@@ -940,6 +935,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC);
KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL); KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL);
KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT); KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT);
KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT); KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT);
KVM_ISA_EXT_SIMPLE_CONFIG(zacas, ZACAS);
KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA); KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA);
KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB); KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB);
KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC); KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC);
...@@ -966,6 +962,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zkr, ZKR); ...@@ -966,6 +962,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zkr, ZKR);
KVM_ISA_EXT_SIMPLE_CONFIG(zksed, ZKSED); KVM_ISA_EXT_SIMPLE_CONFIG(zksed, ZKSED);
KVM_ISA_EXT_SIMPLE_CONFIG(zksh, ZKSH); KVM_ISA_EXT_SIMPLE_CONFIG(zksh, ZKSH);
KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT); KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT);
KVM_ISA_EXT_SIMPLE_CONFIG(ztso, ZTSO);
KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB); KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB);
KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC); KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC);
KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH); KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH);
...@@ -993,6 +990,7 @@ struct vcpu_reg_list *vcpu_configs[] = { ...@@ -993,6 +990,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_svinval, &config_svinval,
&config_svnapot, &config_svnapot,
&config_svpbmt, &config_svpbmt,
&config_zacas,
&config_zba, &config_zba,
&config_zbb, &config_zbb,
&config_zbc, &config_zbc,
...@@ -1019,6 +1017,7 @@ struct vcpu_reg_list *vcpu_configs[] = { ...@@ -1019,6 +1017,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_zksed, &config_zksed,
&config_zksh, &config_zksh,
&config_zkt, &config_zkt,
&config_ztso,
&config_zvbb, &config_zvbb,
&config_zvbc, &config_zvbc,
&config_zvfh, &config_zvfh,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment