Commit 7ed397d1 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Add TEST_REQUIRE macros to reduce skipping copy+paste

Add TEST_REQUIRE() and __TEST_REQUIRE() to replace the myriad open coded
instances of selftests exiting with KSFT_SKIP after printing an
informational message.  In addition to reducing the amount of boilerplate
code in selftests, the UPPERCASE macro names make it easier to visually
identify a test's requirements.

Convert usage that erroneously uses something other than print_skip()
and/or "exits" with '0' or some other non-KSFT_SKIP value.

Intentionally drop a kvm_vm_free() in aarch64/debug-exceptions.c as part
of the conversion.  All memory and file descriptors are freed on process
exit, so the explicit free is superfluous.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3ea9b809
...@@ -375,10 +375,7 @@ static struct kvm_vm *test_vm_create(void) ...@@ -375,10 +375,7 @@ static struct kvm_vm *test_vm_create(void)
ucall_init(vm, NULL); ucall_init(vm, NULL);
test_init_timer_irq(vm); test_init_timer_irq(vm);
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
if (gic_fd < 0) { __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
print_skip("Failed to create vgic-v3");
exit(KSFT_SKIP);
}
/* Make all the test's cmdline args visible to the guest */ /* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args); sync_global_to_guest(vm, test_args);
...@@ -468,10 +465,8 @@ int main(int argc, char *argv[]) ...@@ -468,10 +465,8 @@ int main(int argc, char *argv[])
if (!parse_args(argc, argv)) if (!parse_args(argc, argv))
exit(KSFT_SKIP); exit(KSFT_SKIP);
if (test_args.migration_freq_ms && get_nprocs() < 2) { __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
print_skip("At least two physical CPUs needed for vCPU migration"); "At least two physical CPUs needed for vCPU migration");
exit(KSFT_SKIP);
}
vm = test_vm_create(); vm = test_vm_create();
test_run(vm); test_run(vm);
......
...@@ -259,11 +259,8 @@ int main(int argc, char *argv[]) ...@@ -259,11 +259,8 @@ int main(int argc, char *argv[])
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu); vcpu_init_descriptor_tables(vcpu);
if (debug_version(vcpu) < 6) { __TEST_REQUIRE(debug_version(vcpu) >= 6,
print_skip("Armv8 debug architecture not supported."); "Armv8 debug architecture not supported.");
kvm_vm_free(vm);
exit(KSFT_SKIP);
}
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_BRK_INS, guest_sw_bp_handler); ESR_EC_BRK_INS, guest_sw_bp_handler);
......
...@@ -395,10 +395,12 @@ static void check_supported(struct vcpu_config *c) ...@@ -395,10 +395,12 @@ static void check_supported(struct vcpu_config *c)
struct reg_sublist *s; struct reg_sublist *s;
for_each_sublist(c, s) { for_each_sublist(c, s) {
if (s->capability && !kvm_has_cap(s->capability)) { if (!s->capability)
fprintf(stderr, "%s: %s not available, skipping tests\n", config_name(c), s->name); continue;
exit(KSFT_SKIP);
} __TEST_REQUIRE(kvm_has_cap(s->capability),
"%s: %s not available, skipping tests\n",
config_name(c), s->name);
} }
} }
......
...@@ -192,10 +192,7 @@ static void host_test_system_suspend(void) ...@@ -192,10 +192,7 @@ static void host_test_system_suspend(void)
int main(void) int main(void)
{ {
if (!kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND)) { TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
print_skip("KVM_CAP_ARM_SYSTEM_SUSPEND not supported");
exit(KSFT_SKIP);
}
host_test_cpu_on(); host_test_cpu_on();
host_test_system_suspend(); host_test_system_suspend();
......
...@@ -82,10 +82,7 @@ int main(void) ...@@ -82,10 +82,7 @@ int main(void)
struct kvm_vm *vm; struct kvm_vm *vm;
int ret; int ret;
if (!kvm_has_cap(KVM_CAP_ARM_EL1_32BIT)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL1_32BIT));
print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
exit(KSFT_SKIP);
}
/* Get the preferred target type and copy that to init1 for later use */ /* Get the preferred target type and copy that to init1 for later use */
vm = vm_create_barebones(); vm = vm_create_barebones();
......
...@@ -703,13 +703,9 @@ int main(int ac, char **av) ...@@ -703,13 +703,9 @@ int main(int ac, char **av)
} }
ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2); ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2);
if (!ret) { __TEST_REQUIRE(!ret, "No GICv2 nor GICv3 support");
pr_info("Running GIC_v2 tests.\n");
run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
return 0;
}
print_skip("No GICv2 nor GICv3 support"); pr_info("Running GIC_v2 tests.\n");
exit(KSFT_SKIP); run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
return 0; return 0;
} }
...@@ -768,10 +768,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) ...@@ -768,10 +768,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
gic_fd = vgic_v3_setup(vm, 1, nr_irqs, gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA); GICD_BASE_GPA, GICR_BASE_GPA);
if (gic_fd < 0) { __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
print_skip("Failed to create vgic-v3, skipping");
exit(KSFT_SKIP);
}
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
guest_irq_handlers[args.eoi_split][args.level_sensitive]); guest_irq_handlers[args.eoi_split][args.level_sensitive]);
......
...@@ -104,10 +104,7 @@ static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) ...@@ -104,10 +104,7 @@ static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
return 0; return 0;
pfn = entry & PAGEMAP_PFN_MASK; pfn = entry & PAGEMAP_PFN_MASK;
if (!pfn) { __TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN");
print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
exit(KSFT_SKIP);
}
return pfn; return pfn;
} }
...@@ -380,10 +377,8 @@ int main(int argc, char *argv[]) ...@@ -380,10 +377,8 @@ int main(int argc, char *argv[])
} }
page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
if (page_idle_fd < 0) { __TEST_REQUIRE(page_idle_fd >= 0,
print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled"); "CONFIG_IDLE_PAGE_TRACKING is not enabled");
exit(KSFT_SKIP);
}
close(page_idle_fd); close(page_idle_fd);
for_each_guest_mode(run_test, &params); for_each_guest_mode(run_test, &params);
......
...@@ -34,6 +34,15 @@ static inline int _no_printf(const char *format, ...) { return 0; } ...@@ -34,6 +34,15 @@ static inline int _no_printf(const char *format, ...) { return 0; }
#endif #endif
void print_skip(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void print_skip(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
#define __TEST_REQUIRE(f, fmt, ...) \
do { \
if (!(f)) { \
print_skip(fmt, ##__VA_ARGS__); \
exit(KSFT_SKIP); \
} \
} while (0)
#define TEST_REQUIRE(f) __TEST_REQUIRE(f, "Requirement not met: %s", #f)
ssize_t test_write(int fd, const void *buf, size_t count); ssize_t test_write(int fd, const void *buf, size_t count);
ssize_t test_read(int fd, void *buf, size_t count); ssize_t test_read(int fd, void *buf, size_t count);
......
...@@ -213,10 +213,7 @@ int main(int argc, char *argv[]) ...@@ -213,10 +213,7 @@ int main(int argc, char *argv[])
} }
/* Check the extension for binary stats */ /* Check the extension for binary stats */
if (!kvm_has_cap(KVM_CAP_BINARY_STATS_FD)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD));
print_skip("Binary form statistics interface is not supported");
exit(KSFT_SKIP);
}
/* Create VMs and VCPUs */ /* Create VMs and VCPUs */
vms = malloc(sizeof(vms[0]) * max_vm); vms = malloc(sizeof(vms[0]) * max_vm);
......
...@@ -64,11 +64,9 @@ int main(int argc, char *argv[]) ...@@ -64,11 +64,9 @@ int main(int argc, char *argv[])
rl.rlim_max = nr_fds_wanted; rl.rlim_max = nr_fds_wanted;
int r = setrlimit(RLIMIT_NOFILE, &rl); int r = setrlimit(RLIMIT_NOFILE, &rl);
if (r < 0) { __TEST_REQUIRE(r >= 0,
printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n", "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
old_rlim_max, nr_fds_wanted); old_rlim_max, nr_fds_wanted);
exit(KSFT_SKIP);
}
} else { } else {
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
} }
......
...@@ -26,10 +26,7 @@ int open_path_or_exit(const char *path, int flags) ...@@ -26,10 +26,7 @@ int open_path_or_exit(const char *path, int flags)
int fd; int fd;
fd = open(path, flags); fd = open(path, flags);
if (fd < 0) { __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
print_skip("%s not available (errno: %d)", path, errno);
exit(KSFT_SKIP);
}
return fd; return fd;
} }
...@@ -93,10 +90,7 @@ static void vm_open(struct kvm_vm *vm) ...@@ -93,10 +90,7 @@ static void vm_open(struct kvm_vm *vm)
{ {
vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
if (!kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
print_skip("immediate_exit not available");
exit(KSFT_SKIP);
}
vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
......
...@@ -609,14 +609,14 @@ void vm_xsave_req_perm(int bit) ...@@ -609,14 +609,14 @@ void vm_xsave_req_perm(int bit)
kvm_fd = open_kvm_dev_path_or_exit(); kvm_fd = open_kvm_dev_path_or_exit();
rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr); rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
close(kvm_fd); close(kvm_fd);
if (rc == -1 && (errno == ENXIO || errno == EINVAL)) if (rc == -1 && (errno == ENXIO || errno == EINVAL))
exit(KSFT_SKIP); exit(KSFT_SKIP);
TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc); TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
if (!(bitmask & (1ULL << bit)))
exit(KSFT_SKIP);
if (!is_xfd_supported()) TEST_REQUIRE(bitmask & (1ULL << bit));
exit(KSFT_SKIP);
TEST_REQUIRE(is_xfd_supported());
rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit); rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
......
...@@ -174,10 +174,7 @@ bool nested_svm_supported(void) ...@@ -174,10 +174,7 @@ bool nested_svm_supported(void)
void nested_svm_check_supported(void) void nested_svm_check_supported(void)
{ {
if (!nested_svm_supported()) { TEST_REQUIRE(nested_svm_supported());
print_skip("nested SVM not enabled");
exit(KSFT_SKIP);
}
} }
/* /*
......
...@@ -391,10 +391,7 @@ bool nested_vmx_supported(void) ...@@ -391,10 +391,7 @@ bool nested_vmx_supported(void)
void nested_vmx_check_supported(void) void nested_vmx_check_supported(void)
{ {
if (!nested_vmx_supported()) { TEST_REQUIRE(nested_vmx_supported());
print_skip("nested VMX not enabled");
exit(KSFT_SKIP);
}
} }
static void nested_create_pte(struct kvm_vm *vm, static void nested_create_pte(struct kvm_vm *vm,
......
...@@ -171,12 +171,11 @@ static void *migration_worker(void *ign) ...@@ -171,12 +171,11 @@ static void *migration_worker(void *ign)
return NULL; return NULL;
} }
static int calc_min_max_cpu(void) static void calc_min_max_cpu(void)
{ {
int i, cnt, nproc; int i, cnt, nproc;
if (CPU_COUNT(&possible_mask) < 2) TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2);
return -EINVAL;
/* /*
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
...@@ -198,7 +197,8 @@ static int calc_min_max_cpu(void) ...@@ -198,7 +197,8 @@ static int calc_min_max_cpu(void)
cnt++; cnt++;
} }
return (cnt < 2) ? -EINVAL : 0; __TEST_REQUIRE(cnt >= 2,
"Only one usable CPU, task migration not possible");
} }
int main(int argc, char *argv[]) int main(int argc, char *argv[])
...@@ -215,10 +215,7 @@ int main(int argc, char *argv[]) ...@@ -215,10 +215,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno, TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno)); strerror(errno));
if (calc_min_max_cpu()) { calc_min_max_cpu();
print_skip("Only one usable CPU, task migration not possible");
exit(KSFT_SKIP);
}
sys_rseq(0); sys_rseq(0);
......
...@@ -756,20 +756,17 @@ struct testdef { ...@@ -756,20 +756,17 @@ struct testdef {
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
int memop_cap, extension_cap, idx; int extension_cap, idx;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */ setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
ksft_print_header(); ksft_print_header();
memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
if (!memop_cap) {
ksft_exit_skip("CAP_S390_MEM_OP not supported.\n");
}
ksft_set_plan(ARRAY_SIZE(testlist)); ksft_set_plan(ARRAY_SIZE(testlist));
extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) { for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
if (testlist[idx].extension >= extension_cap) { if (testlist[idx].extension >= extension_cap) {
testlist[idx].test(); testlist[idx].test();
......
...@@ -229,14 +229,13 @@ int main(int argc, char *argv[]) ...@@ -229,14 +229,13 @@ int main(int argc, char *argv[])
struct kvm_vm *vm; struct kvm_vm *vm;
int idx; int idx;
TEST_REQUIRE(kvm_check_cap(KVM_CAP_SYNC_REGS));
/* Tell stdout not to buffer its content */ /* Tell stdout not to buffer its content */
setbuf(stdout, NULL); setbuf(stdout, NULL);
ksft_print_header(); ksft_print_header();
if (!kvm_check_cap(KVM_CAP_SYNC_REGS))
ksft_exit_skip("CAP_SYNC_REGS not supported");
ksft_set_plan(ARRAY_SIZE(testlist)); ksft_set_plan(ARRAY_SIZE(testlist));
/* Create VM */ /* Create VM */
......
...@@ -271,10 +271,7 @@ int main(int ac, char **av) ...@@ -271,10 +271,7 @@ int main(int ac, char **av)
virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages); virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
ucall_init(vm, NULL); ucall_init(vm, NULL);
if (!is_steal_time_supported(vcpus[0])) { TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
print_skip("steal-time not supported");
exit(KSFT_SKIP);
}
/* Run test on each VCPU */ /* Run test on each VCPU */
for (i = 0; i < NR_VCPUS; ++i) { for (i = 0; i < NR_VCPUS; ++i) {
......
...@@ -28,11 +28,9 @@ static struct test_case test_cases[] = { ...@@ -28,11 +28,9 @@ static struct test_case test_cases[] = {
static void check_preconditions(struct kvm_vcpu *vcpu) static void check_preconditions(struct kvm_vcpu *vcpu)
{ {
if (!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET)) __TEST_REQUIRE(!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL,
return; KVM_VCPU_TSC_OFFSET),
"KVM_VCPU_TSC_OFFSET not supported; skipping test");
print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test");
exit(KSFT_SKIP);
} }
static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test) static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
......
...@@ -317,7 +317,6 @@ int main(int argc, char *argv[]) ...@@ -317,7 +317,6 @@ int main(int argc, char *argv[])
{ {
struct kvm_cpuid_entry2 *entry; struct kvm_cpuid_entry2 *entry;
struct kvm_regs regs1, regs2; struct kvm_regs regs1, regs2;
bool amx_supported = false;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
struct kvm_run *run; struct kvm_run *run;
...@@ -334,21 +333,15 @@ int main(int argc, char *argv[]) ...@@ -334,21 +333,15 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
entry = kvm_get_supported_cpuid_entry(1); entry = kvm_get_supported_cpuid_entry(1);
if (!(entry->ecx & X86_FEATURE_XSAVE)) { TEST_REQUIRE(entry->ecx & X86_FEATURE_XSAVE);
print_skip("XSAVE feature not supported");
exit(KSFT_SKIP);
}
if (kvm_get_cpuid_max_basic() >= 0xd) { TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xd);
entry = kvm_get_supported_cpuid_index(0xd, 0);
amx_supported = entry && !!(entry->eax & XFEATURE_MASK_XTILE); entry = kvm_get_supported_cpuid_index(0xd, 0);
if (!amx_supported) { TEST_REQUIRE(entry->eax & XFEATURE_MASK_XTILE);
print_skip("AMX is not supported by the vCPU (eax=0x%x)", entry->eax);
exit(KSFT_SKIP); /* Get xsave/restore max size */
} xsave_restore_size = entry->ecx;
/* Get xsave/restore max size */
xsave_restore_size = entry->ecx;
}
run = vcpu->run; run = vcpu->run;
vcpu_regs_get(vcpu, &regs1); vcpu_regs_get(vcpu, &regs1);
......
...@@ -70,10 +70,7 @@ int main(int argc, char *argv[]) ...@@ -70,10 +70,7 @@ int main(int argc, char *argv[])
struct ucall uc; struct ucall uc;
entry = kvm_get_supported_cpuid_entry(1); entry = kvm_get_supported_cpuid_entry(1);
if (!(entry->ecx & X86_FEATURE_XSAVE)) { TEST_REQUIRE(entry->ecx & X86_FEATURE_XSAVE);
print_skip("XSAVE feature not supported");
return 0;
}
/* Tell stdout not to buffer its content */ /* Tell stdout not to buffer its content */
setbuf(stdout, NULL); setbuf(stdout, NULL);
......
...@@ -95,10 +95,7 @@ int main(void) ...@@ -95,10 +95,7 @@ int main(void)
1, /* cli */ 1, /* cli */
}; };
if (!kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG));
print_skip("KVM_CAP_SET_GUEST_DEBUG not supported");
return 0;
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run; run = vcpu->run;
......
...@@ -162,10 +162,7 @@ int main(int argc, char *argv[]) ...@@ -162,10 +162,7 @@ int main(int argc, char *argv[])
/* Tell stdout not to buffer its content */ /* Tell stdout not to buffer its content */
setbuf(stdout, NULL); setbuf(stdout, NULL);
if (!kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
printf("module parameter 'allow_smaller_maxphyaddr' is not set. Skipping test.\n");
return 0;
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
......
...@@ -208,12 +208,9 @@ int main(int argc, char *argv[]) ...@@ -208,12 +208,9 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
if (!nested_vmx_supported() || TEST_REQUIRE(nested_vmx_supported());
!kvm_has_cap(KVM_CAP_NESTED_STATE) || TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
!kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
print_skip("Enlightened VMCS is unsupported");
exit(KSFT_SKIP);
}
vcpu_set_hv_cpuid(vcpu); vcpu_set_hv_cpuid(vcpu);
vcpu_enable_evmcs(vcpu); vcpu_enable_evmcs(vcpu);
......
...@@ -156,10 +156,7 @@ static void test_fix_hypercall_disabled(void) ...@@ -156,10 +156,7 @@ static void test_fix_hypercall_disabled(void)
int main(void) int main(void)
{ {
if (!(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
print_skip("KVM_X86_QUIRK_HYPERCALL_INSN not supported");
exit(KSFT_SKIP);
}
test_fix_hypercall(); test_fix_hypercall();
test_fix_hypercall_disabled(); test_fix_hypercall_disabled();
......
...@@ -25,10 +25,7 @@ int main(int argc, char *argv[]) ...@@ -25,10 +25,7 @@ int main(int argc, char *argv[])
* will cover the "regular" list of MSRs, the coverage here is purely * will cover the "regular" list of MSRs, the coverage here is purely
* opportunistic and not interesting on its own. * opportunistic and not interesting on its own.
*/ */
if (!kvm_check_cap(KVM_CAP_GET_MSR_FEATURES)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_GET_MSR_FEATURES));
print_skip("KVM_CAP_GET_MSR_FEATURES not supported");
exit(KSFT_SKIP);
}
(void)kvm_get_msr_index_list(); (void)kvm_get_msr_index_list();
......
...@@ -137,10 +137,7 @@ int main(int argc, char *argv[]) ...@@ -137,10 +137,7 @@ int main(int argc, char *argv[])
/* Tell stdout not to buffer its content */ /* Tell stdout not to buffer its content */
setbuf(stdout, NULL); setbuf(stdout, NULL);
if (!kvm_has_cap(KVM_CAP_HYPERV_CPUID)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
print_skip("KVM_CAP_HYPERV_CPUID not supported");
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
......
...@@ -127,10 +127,8 @@ int main(int argc, char *argv[]) ...@@ -127,10 +127,8 @@ int main(int argc, char *argv[])
struct ucall uc; struct ucall uc;
int stage; int stage;
if (!nested_svm_supported()) { TEST_REQUIRE(nested_svm_supported());
print_skip("Nested SVM not supported");
exit(KSFT_SKIP);
}
/* Create VM */ /* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vcpu); vcpu_set_hv_cpuid(vcpu);
......
...@@ -181,11 +181,7 @@ int main(void) ...@@ -181,11 +181,7 @@ int main(void)
int flags; int flags;
flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK); flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
if (!(flags & KVM_CLOCK_REALTIME)) { TEST_REQUIRE(flags & KVM_CLOCK_REALTIME);
print_skip("KVM_CLOCK_REALTIME not supported; flags: %x",
flags);
exit(KSFT_SKIP);
}
check_clocksource(); check_clocksource();
......
...@@ -204,10 +204,7 @@ int main(void) ...@@ -204,10 +204,7 @@ int main(void)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
if (!kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID));
print_skip("KVM_CAP_ENFORCE_PV_FEATURE_CPUID not supported");
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_main); vm = vm_create_with_one_vcpu(&vcpu, guest_main);
......
...@@ -93,15 +93,9 @@ int main(void) ...@@ -93,15 +93,9 @@ int main(void)
{ {
int warnings_before, warnings_after; int warnings_before, warnings_after;
if (!is_intel_cpu()) { TEST_REQUIRE(is_intel_cpu());
print_skip("Must be run on an Intel CPU");
exit(KSFT_SKIP);
}
if (vm_is_unrestricted_guest(NULL)) { TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
print_skip("Unrestricted guest must be disabled");
exit(KSFT_SKIP);
}
warnings_before = get_warnings_count(); warnings_before = get_warnings_count();
......
...@@ -117,16 +117,10 @@ int main(int argc, char *argv[]) ...@@ -117,16 +117,10 @@ int main(int argc, char *argv[])
} }
} }
if (!do_gbpages && !do_maxphyaddr) { __TEST_REQUIRE(do_gbpages || do_maxphyaddr, "No sub-tests selected");
print_skip("No sub-tests selected");
return 0;
}
entry = kvm_get_supported_cpuid_entry(0x80000001); entry = kvm_get_supported_cpuid_entry(0x80000001);
if (!(entry->edx & CPUID_GBPAGES)) { TEST_REQUIRE(entry->edx & CPUID_GBPAGES);
print_skip("1gb hugepages not supported");
return 0;
}
if (do_gbpages) { if (do_gbpages) {
pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n"); pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
......
...@@ -70,17 +70,12 @@ int main(int argc, char *argv[]) ...@@ -70,17 +70,12 @@ int main(int argc, char *argv[])
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
int rv;
uint64_t msr_platform_info; uint64_t msr_platform_info;
/* Tell stdout not to buffer its content */ /* Tell stdout not to buffer its content */
setbuf(stdout, NULL); setbuf(stdout, NULL);
rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO); TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO));
if (!rv) {
print_skip("KVM_CAP_MSR_PLATFORM_INFO not supported");
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
......
...@@ -443,39 +443,24 @@ static bool use_amd_pmu(void) ...@@ -443,39 +443,24 @@ static bool use_amd_pmu(void)
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
void (*guest_code)(void) = NULL; void (*guest_code)(void);
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
int r;
/* Tell stdout not to buffer its content */ /* Tell stdout not to buffer its content */
setbuf(stdout, NULL); setbuf(stdout, NULL);
r = kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER); TEST_REQUIRE(kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER));
if (!r) {
print_skip("KVM_CAP_PMU_EVENT_FILTER not supported");
exit(KSFT_SKIP);
}
if (use_intel_pmu()) TEST_REQUIRE(use_intel_pmu() || use_amd_pmu());
guest_code = intel_guest_code; guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code;
else if (use_amd_pmu())
guest_code = amd_guest_code;
if (!guest_code) {
print_skip("Don't know how to test this guest PMU");
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu); vcpu_init_descriptor_tables(vcpu);
if (!sanity_check_pmu(vcpu)) { TEST_REQUIRE(sanity_check_pmu(vcpu));
print_skip("Guest PMU is not functional");
exit(KSFT_SKIP);
}
if (use_amd_pmu()) if (use_amd_pmu())
test_amd_deny_list(vcpu); test_amd_deny_list(vcpu);
......
...@@ -123,10 +123,7 @@ static void check_set_bsp_busy(void) ...@@ -123,10 +123,7 @@ static void check_set_bsp_busy(void)
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
if (!kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID));
print_skip("set_boot_cpu_id not available");
return 0;
}
run_vm_bsp(0); run_vm_bsp(0);
run_vm_bsp(1); run_vm_bsp(1);
......
...@@ -400,22 +400,15 @@ int main(int argc, char *argv[]) ...@@ -400,22 +400,15 @@ int main(int argc, char *argv[])
{ {
struct kvm_cpuid_entry2 *cpuid; struct kvm_cpuid_entry2 *cpuid;
if (!kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM) && TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM));
!kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM));
print_skip("Capabilities not available");
exit(KSFT_SKIP);
}
cpuid = kvm_get_supported_cpuid_entry(0x80000000); cpuid = kvm_get_supported_cpuid_entry(0x80000000);
if (cpuid->eax < 0x8000001f) { TEST_REQUIRE(cpuid->eax >= 0x8000001f);
print_skip("AMD memory encryption not available");
exit(KSFT_SKIP);
}
cpuid = kvm_get_supported_cpuid_entry(0x8000001f); cpuid = kvm_get_supported_cpuid_entry(0x8000001f);
if (!(cpuid->eax & X86_FEATURE_SEV)) { TEST_REQUIRE(cpuid->eax & X86_FEATURE_SEV);
print_skip("AMD SEV not available");
exit(KSFT_SKIP);
}
have_sev_es = !!(cpuid->eax & X86_FEATURE_SEV_ES); have_sev_es = !!(cpuid->eax & X86_FEATURE_SEV_ES);
if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) { if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
......
...@@ -94,14 +94,8 @@ int main(int argc, char *argv[]) ...@@ -94,14 +94,8 @@ int main(int argc, char *argv[])
setbuf(stdout, NULL); setbuf(stdout, NULL);
cap = kvm_check_cap(KVM_CAP_SYNC_REGS); cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) { TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
print_skip("KVM_CAP_SYNC_REGS not supported"); TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
exit(KSFT_SKIP);
}
if ((cap & INVALID_SYNC_FIELD) != 0) {
print_skip("The \"invalid\" field is not invalid");
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
......
...@@ -46,15 +46,9 @@ int main(void) ...@@ -46,15 +46,9 @@ int main(void)
vm_vaddr_t vmx_pages_gva; vm_vaddr_t vmx_pages_gva;
struct ucall uc; struct ucall uc;
if (!nested_vmx_supported()) { nested_vmx_check_supported();
print_skip("Nested VMX not supported");
exit(KSFT_SKIP);
}
if (!kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
print_skip("KVM_CAP_X86_TRIPLE_FAULT_EVENT not supported");
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1); vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
......
...@@ -93,10 +93,7 @@ static void *run_vcpu(void *_cpu_nr) ...@@ -93,10 +93,7 @@ static void *run_vcpu(void *_cpu_nr)
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
if (!kvm_has_cap(KVM_CAP_VM_TSC_CONTROL)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_TSC_CONTROL));
print_skip("KVM_CAP_VM_TSC_CONTROL not available");
exit(KSFT_SKIP);
}
vm = vm_create(NR_TEST_VCPUS); vm = vm_create(NR_TEST_VCPUS);
vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ); vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
......
...@@ -111,10 +111,8 @@ int main(int argc, char *argv[]) ...@@ -111,10 +111,8 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
if (!is_intel_cpu() || vm_is_unrestricted_guest(NULL)) { TEST_REQUIRE(is_intel_cpu());
print_skip("Must be run with kvm_intel.unrestricted_guest=0"); TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
get_set_sigalrm_vcpu(vcpu); get_set_sigalrm_vcpu(vcpu);
......
...@@ -116,14 +116,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) ...@@ -116,14 +116,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE(); GUEST_DONE();
} }
static void tsc_scaling_check_supported(void)
{
if (!kvm_has_cap(KVM_CAP_TSC_CONTROL)) {
print_skip("TSC scaling not supported by the HW");
exit(KSFT_SKIP);
}
}
static void stable_tsc_check_supported(void) static void stable_tsc_check_supported(void)
{ {
FILE *fp; FILE *fp;
...@@ -159,7 +151,7 @@ int main(int argc, char *argv[]) ...@@ -159,7 +151,7 @@ int main(int argc, char *argv[])
uint64_t l2_tsc_freq = 0; uint64_t l2_tsc_freq = 0;
nested_vmx_check_supported(); nested_vmx_check_supported();
tsc_scaling_check_supported(); TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
stable_tsc_check_supported(); stable_tsc_check_supported();
/* /*
......
...@@ -57,7 +57,6 @@ int main(int argc, char *argv[]) ...@@ -57,7 +57,6 @@ int main(int argc, char *argv[])
struct kvm_cpuid2 *cpuid; struct kvm_cpuid2 *cpuid;
struct kvm_cpuid_entry2 *entry_1_0; struct kvm_cpuid_entry2 *entry_1_0;
struct kvm_cpuid_entry2 *entry_a_0; struct kvm_cpuid_entry2 *entry_a_0;
bool pdcm_supported = false;
struct kvm_vm *vm; struct kvm_vm *vm;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int ret; int ret;
...@@ -71,20 +70,14 @@ int main(int argc, char *argv[]) ...@@ -71,20 +70,14 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
cpuid = kvm_get_supported_cpuid(); cpuid = kvm_get_supported_cpuid();
if (kvm_get_cpuid_max_basic() >= 0xa) { TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xa);
entry_1_0 = kvm_get_supported_cpuid_index(1, 0);
entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0); entry_1_0 = kvm_get_supported_cpuid_index(1, 0);
pdcm_supported = entry_1_0 && !!(entry_1_0->ecx & X86_FEATURE_PDCM); entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0);
eax.full = entry_a_0->eax; TEST_REQUIRE(entry_1_0->ecx & X86_FEATURE_PDCM);
}
if (!pdcm_supported) { eax.full = entry_a_0->eax;
print_skip("MSR_IA32_PERF_CAPABILITIES is not supported by the vCPU"); __TEST_REQUIRE(eax.split.version_id, "PMU is not supported by the vCPU");
exit(KSFT_SKIP);
}
if (!eax.split.version_id) {
print_skip("PMU is not supported by the vCPU");
exit(KSFT_SKIP);
}
/* testcase 1, set capabilities when we have PDCM bit */ /* testcase 1, set capabilities when we have PDCM bit */
vcpu_set_cpuid(vcpu, cpuid); vcpu_set_cpuid(vcpu, cpuid);
......
...@@ -169,10 +169,7 @@ int main(int argc, char *argv[]) ...@@ -169,10 +169,7 @@ int main(int argc, char *argv[])
*/ */
nested_vmx_check_supported(); nested_vmx_check_supported();
if (!kvm_has_cap(KVM_CAP_NESTED_STATE)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
print_skip("KVM_CAP_NESTED_STATE not supported");
exit(KSFT_SKIP);
}
/* Create VM */ /* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
......
...@@ -267,10 +267,7 @@ int main(int argc, char *argv[]) ...@@ -267,10 +267,7 @@ int main(int argc, char *argv[])
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS); have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
if (!kvm_has_cap(KVM_CAP_NESTED_STATE)) { TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
print_skip("KVM_CAP_NESTED_STATE not available");
exit(KSFT_SKIP);
}
/* /*
* AMD currently does not implement set_nested_state, so for now we * AMD currently does not implement set_nested_state, so for now we
......
...@@ -362,10 +362,7 @@ int main(int argc, char *argv[]) ...@@ -362,10 +362,7 @@ int main(int argc, char *argv[])
!strncmp(argv[1], "--verbose", 10)); !strncmp(argv[1], "--verbose", 10));
int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM); int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
if (!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO) ) { TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO);
print_skip("KVM_XEN_HVM_CONFIG_SHARED_INFO not available");
exit(KSFT_SKIP);
}
bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE); bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE);
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL); bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
......
...@@ -80,14 +80,12 @@ static void guest_code(void) ...@@ -80,14 +80,12 @@ static void guest_code(void)
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
unsigned int xen_caps;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
if (!(kvm_check_cap(KVM_CAP_XEN_HVM) & xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) { TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available");
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vcpu); vcpu_set_hv_cpuid(vcpu);
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
struct kvm_cpuid_entry2 *entry; struct kvm_cpuid_entry2 *entry;
bool xss_supported = false;
bool xss_in_msr_list; bool xss_in_msr_list;
struct kvm_vm *vm; struct kvm_vm *vm;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -29,14 +28,10 @@ int main(int argc, char *argv[]) ...@@ -29,14 +28,10 @@ int main(int argc, char *argv[])
/* Create VM */ /* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, NULL); vm = vm_create_with_one_vcpu(&vcpu, NULL);
if (kvm_get_cpuid_max_basic() >= 0xd) { TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xd);
entry = kvm_get_supported_cpuid_index(0xd, 1);
xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES); entry = kvm_get_supported_cpuid_index(0xd, 1);
} TEST_REQUIRE(entry->eax & X86_FEATURE_XSAVES);
if (!xss_supported) {
print_skip("IA32_XSS is not supported by the vCPU");
exit(KSFT_SKIP);
}
xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS); xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
TEST_ASSERT(xss_val == 0, TEST_ASSERT(xss_val == 0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment