Commit 41a23ab3 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check

state_test/smm_test use KVM_CAP_NESTED_STATE check as an indicator for
nested VMX/SVM presence and this is incorrect. Check for the required
features dirrectly.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200610135847.754289-2-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 77f81f37
...@@ -33,6 +33,7 @@ struct svm_test_data { ...@@ -33,6 +33,7 @@ struct svm_test_data {
struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva); struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp); void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
bool nested_svm_supported(void);
void nested_svm_check_supported(void); void nested_svm_check_supported(void);
static inline bool cpu_has_svm(void) static inline bool cpu_has_svm(void)
......
...@@ -603,6 +603,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx); ...@@ -603,6 +603,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx); bool load_vmcs(struct vmx_pages *vmx);
bool nested_vmx_supported(void);
void nested_vmx_check_supported(void); void nested_vmx_check_supported(void);
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
......
...@@ -148,14 +148,18 @@ void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) ...@@ -148,14 +148,18 @@ void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
: "r15", "memory"); : "r15", "memory");
} }
void nested_svm_check_supported(void) bool nested_svm_supported(void)
{ {
struct kvm_cpuid_entry2 *entry = struct kvm_cpuid_entry2 *entry =
kvm_get_supported_cpuid_entry(0x80000001); kvm_get_supported_cpuid_entry(0x80000001);
if (!(entry->ecx & CPUID_SVM)) { return entry->ecx & CPUID_SVM;
}
void nested_svm_check_supported(void)
{
if (!nested_svm_supported()) {
print_skip("nested SVM not enabled"); print_skip("nested SVM not enabled");
exit(KSFT_SKIP); exit(KSFT_SKIP);
} }
} }
...@@ -379,11 +379,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) ...@@ -379,11 +379,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
init_vmcs_guest_state(guest_rip, guest_rsp); init_vmcs_guest_state(guest_rip, guest_rsp);
} }
void nested_vmx_check_supported(void) bool nested_vmx_supported(void)
{ {
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
if (!(entry->ecx & CPUID_VMX)) { return entry->ecx & CPUID_VMX;
}
void nested_vmx_check_supported(void)
{
if (!nested_vmx_supported()) {
print_skip("nested VMX not enabled"); print_skip("nested VMX not enabled");
exit(KSFT_SKIP); exit(KSFT_SKIP);
} }
......
...@@ -118,16 +118,17 @@ int main(int argc, char *argv[]) ...@@ -118,16 +118,17 @@ int main(int argc, char *argv[])
vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM) if (nested_svm_supported())
vcpu_alloc_svm(vm, &nested_gva); vcpu_alloc_svm(vm, &nested_gva);
else else if (nested_vmx_supported())
vcpu_alloc_vmx(vm, &nested_gva); vcpu_alloc_vmx(vm, &nested_gva);
vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
} else {
pr_info("will skip SMM test with VMX enabled\n");
vcpu_args_set(vm, VCPU_ID, 1, 0);
} }
if (!nested_gva)
pr_info("will skip SMM test with VMX enabled\n");
vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
for (stage = 1;; stage++) { for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID); _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
......
...@@ -171,16 +171,17 @@ int main(int argc, char *argv[]) ...@@ -171,16 +171,17 @@ int main(int argc, char *argv[])
vcpu_regs_get(vm, VCPU_ID, &regs1); vcpu_regs_get(vm, VCPU_ID, &regs1);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM) if (nested_svm_supported())
vcpu_alloc_svm(vm, &nested_gva); vcpu_alloc_svm(vm, &nested_gva);
else else if (nested_vmx_supported())
vcpu_alloc_vmx(vm, &nested_gva); vcpu_alloc_vmx(vm, &nested_gva);
vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
} else {
pr_info("will skip nested state checks\n");
vcpu_args_set(vm, VCPU_ID, 1, 0);
} }
if (!nested_gva)
pr_info("will skip nested state checks\n");
vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
for (stage = 1;; stage++) { for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID); _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment