Commit a12c86c4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Simplify KVM_ENABLE_CAP helper APIs

Rework the KVM_ENABLE_CAP helpers to take the cap and arg0; literally
every current user, and likely every future user, wants to set 0 or 1
arguments and nothing else.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ac712209
......@@ -156,15 +156,6 @@ static void host_test_cpu_on(void)
kvm_vm_free(vm);
}
static void enable_system_suspend(struct kvm_vm *vm)
{
struct kvm_enable_cap cap = {
.cap = KVM_CAP_ARM_SYSTEM_SUSPEND,
};
vm_enable_cap(vm, &cap);
}
static void guest_test_system_suspend(void)
{
uint64_t ret;
......@@ -183,7 +174,7 @@ static void host_test_system_suspend(void)
struct kvm_vm *vm;
vm = setup_vm(guest_test_system_suspend);
enable_system_suspend(vm);
vm_enable_cap(vm, KVM_CAP_ARM_SYSTEM_SUSPEND, 0);
vcpu_power_off(vm, VCPU_ID_TARGET);
run = vcpu_state(vm, VCPU_ID_SOURCE);
......
......@@ -213,7 +213,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct timespec get_dirty_log_total = (struct timespec){0};
struct timespec vcpu_dirty_total = (struct timespec){0};
struct timespec avg;
struct kvm_enable_cap cap = {};
struct timespec clear_dirty_log_total = (struct timespec){0};
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
......@@ -229,11 +228,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
if (dirty_log_manual_caps) {
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
cap.args[0] = dirty_log_manual_caps;
vm_enable_cap(vm, &cap);
}
if (dirty_log_manual_caps)
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
dirty_log_manual_caps);
arch_setup_vm(vm, nr_vcpus);
......
......@@ -217,16 +217,13 @@ static bool clear_log_supported(void)
static void clear_log_create_vm_done(struct kvm_vm *vm)
{
struct kvm_enable_cap cap = {};
u64 manual_caps;
manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
cap.args[0] = manual_caps;
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
}
static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
......
......@@ -231,13 +231,17 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap)
return ret;
}
static inline int __vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
{
return __vm_ioctl(vm, KVM_ENABLE_CAP, cap);
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
static inline void vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
{
vm_ioctl(vm, KVM_ENABLE_CAP, cap);
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
......@@ -363,9 +367,11 @@ void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid);
static inline void vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_enable_cap *cap)
uint32_t cap, uint64_t arg0)
{
vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, cap);
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_cap);
}
static inline void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
......
......@@ -85,11 +85,7 @@ int kvm_check_cap(long cap)
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
{
struct kvm_enable_cap cap = { 0 };
cap.cap = KVM_CAP_DIRTY_LOG_RING;
cap.args[0] = ring_size;
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
vm->dirty_ring_size = ring_size;
}
......
......@@ -46,12 +46,8 @@ int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
{
uint16_t evmcs_ver;
struct kvm_enable_cap enable_evmcs_cap = {
.cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
.args[0] = (unsigned long)&evmcs_ver
};
vcpu_enable_cap(vm, vcpu_id, &enable_evmcs_cap);
vcpu_enable_cap(vm, vcpu_id, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
(unsigned long)&evmcs_ver);
/* KVM should return supported EVMCS version range */
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
......
......@@ -161,10 +161,6 @@ static uint64_t process_ucall(struct kvm_vm *vm)
int main(int argc, char *argv[])
{
struct kvm_enable_cap emul_failure_cap = {
.cap = KVM_CAP_EXIT_ON_EMULATION_FAILURE,
.args[0] = 1,
};
struct kvm_cpuid_entry2 *entry;
struct kvm_cpuid2 *cpuid;
struct kvm_vm *vm;
......@@ -192,7 +188,7 @@ int main(int argc, char *argv[])
rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
vm_enable_cap(vm, &emul_failure_cap);
vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
MEM_REGION_GPA, MEM_REGION_SLOT,
......
......@@ -140,15 +140,13 @@ static void test_fix_hypercall(void)
static void test_fix_hypercall_disabled(void)
{
struct kvm_enable_cap cap = {0};
struct kvm_vm *vm;
vm = vm_create_default(VCPU_ID, 0, guest_main);
setup_ud_vector(vm);
cap.cap = KVM_CAP_DISABLE_QUIRKS2;
cap.args[0] = KVM_X86_QUIRK_FIX_HYPERCALL_INSN;
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
ud_expected = true;
sync_global_to_guest(vm, ud_expected);
......
......@@ -182,10 +182,6 @@ static void guest_test_msrs_access(void)
};
struct kvm_cpuid2 *best;
vm_vaddr_t msr_gva;
struct kvm_enable_cap cap = {
.cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
.args = {1}
};
struct msr_data *msr;
while (true) {
......@@ -196,7 +192,7 @@ static void guest_test_msrs_access(void)
msr = addr_gva2hva(vm, msr_gva);
vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
vcpu_enable_cap(vm, VCPU_ID, &cap);
vcpu_enable_cap(vm, VCPU_ID, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
vcpu_set_hv_cpuid(vm, VCPU_ID);
......@@ -337,9 +333,7 @@ static void guest_test_msrs_access(void)
* Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
* capability enabled and guest visible CPUID bit unset.
*/
cap.cap = KVM_CAP_HYPERV_SYNIC2;
cap.args[0] = 0;
vcpu_enable_cap(vm, VCPU_ID, &cap);
vcpu_enable_cap(vm, VCPU_ID, KVM_CAP_HYPERV_SYNIC2, 0);
break;
case 22:
feat.eax |= HV_MSR_SYNIC_AVAILABLE;
......@@ -518,10 +512,6 @@ static void guest_test_hcalls_access(void)
struct kvm_cpuid_entry2 dbg = {
.function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
};
struct kvm_enable_cap cap = {
.cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
.args = {1}
};
vm_vaddr_t hcall_page, hcall_params;
struct hcall_data *hcall;
struct kvm_cpuid2 *best;
......@@ -542,7 +532,7 @@ static void guest_test_hcalls_access(void)
memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
vcpu_enable_cap(vm, VCPU_ID, &cap);
vcpu_enable_cap(vm, VCPU_ID, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
vcpu_set_hv_cpuid(vm, VCPU_ID);
......
......@@ -206,7 +206,6 @@ static void enter_guest(struct kvm_vm *vm)
int main(void)
{
struct kvm_enable_cap cap = {0};
struct kvm_cpuid2 *best;
struct kvm_vm *vm;
......@@ -217,9 +216,7 @@ int main(void)
vm = vm_create_default(VCPU_ID, 0, guest_main);
cap.cap = KVM_CAP_ENFORCE_PV_FEATURE_CPUID;
cap.args[0] = 1;
vcpu_enable_cap(vm, VCPU_ID, &cap);
vcpu_enable_cap(vm, VCPU_ID, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
best = kvm_get_supported_cpuid();
clear_kvm_cpuid_features(best);
......
......@@ -14,7 +14,6 @@
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_enable_cap cap = { 0 };
int ret;
vm = vm_create(0);
......@@ -23,21 +22,16 @@ int main(int argc, char *argv[])
ret = vm_check_cap(vm, KVM_CAP_MAX_VCPU_ID);
/* Try to set KVM_CAP_MAX_VCPU_ID beyond KVM cap */
cap.cap = KVM_CAP_MAX_VCPU_ID;
cap.args[0] = ret + 1;
ret = __vm_enable_cap(vm, &cap);
ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, ret + 1);
TEST_ASSERT(ret < 0,
"Setting KVM_CAP_MAX_VCPU_ID beyond KVM cap should fail");
/* Set KVM_CAP_MAX_VCPU_ID */
cap.cap = KVM_CAP_MAX_VCPU_ID;
cap.args[0] = MAX_VCPU_ID;
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID);
/* Try to set KVM_CAP_MAX_VCPU_ID again */
cap.args[0] = MAX_VCPU_ID + 1;
ret = __vm_enable_cap(vm, &cap);
ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID + 1);
TEST_ASSERT(ret < 0,
"Setting KVM_CAP_MAX_VCPU_ID multiple times should fail");
......
......@@ -35,22 +35,12 @@ static void guest_code(void)
}
}
static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
{
struct kvm_enable_cap cap = {};
cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
cap.flags = 0;
cap.args[0] = (int)enable;
vm_enable_cap(vm, &cap);
}
static void test_msr_platform_info_enabled(struct kvm_vm *vm)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
struct ucall uc;
set_msr_platform_info_enabled(vm, true);
vm_enable_cap(vm, KVM_CAP_MSR_PLATFORM_INFO, true);
vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
......@@ -69,7 +59,7 @@ static void test_msr_platform_info_disabled(struct kvm_vm *vm)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
set_msr_platform_info_enabled(vm, false);
vm_enable_cap(vm, KVM_CAP_MSR_PLATFORM_INFO, false);
vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
"Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
......
......@@ -360,7 +360,6 @@ static void test_pmu_config_disable(void (*guest_code)(void))
{
int r;
struct kvm_vm *vm;
struct kvm_enable_cap cap = { 0 };
r = kvm_check_cap(KVM_CAP_PMU_CAPABILITY);
if (!(r & KVM_PMU_CAP_DISABLE))
......@@ -368,9 +367,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
vm = vm_create_without_vcpus(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES);
cap.cap = KVM_CAP_PMU_CAPABILITY;
cap.args[0] = KVM_PMU_CAP_DISABLE;
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
vm_vcpu_add_default(vm, VCPU_ID, guest_code);
vm_init_descriptor_tables(vm);
......
......@@ -82,12 +82,7 @@ static struct kvm_vm *aux_vm_create(bool with_vcpus)
static int __sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
{
struct kvm_enable_cap cap = {
.cap = KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM,
.args = { src->fd }
};
return __vm_enable_cap(dst, &cap);
return __vm_enable_cap(dst, KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM, src->fd);
}
......@@ -223,12 +218,7 @@ static void test_sev_migrate_parameters(void)
static int __sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
{
struct kvm_enable_cap cap = {
.cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
.args = { src->fd }
};
return __vm_enable_cap(dst, &cap);
return __vm_enable_cap(dst, KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, src->fd);
}
......
......@@ -46,11 +46,6 @@ int main(void)
vm_vaddr_t vmx_pages_gva;
struct ucall uc;
struct kvm_enable_cap cap = {
.cap = KVM_CAP_X86_TRIPLE_FAULT_EVENT,
.args = {1}
};
if (!nested_vmx_supported()) {
print_skip("Nested VMX not supported");
exit(KSFT_SKIP);
......@@ -62,7 +57,7 @@ int main(void)
}
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
run = vcpu_state(vm, VCPU_ID);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
......
......@@ -550,11 +550,8 @@ static void run_guest_then_process_ucall_done(struct kvm_vm *vm)
process_ucall_done(vm);
}
static void test_msr_filter_allow(void) {
struct kvm_enable_cap cap = {
.cap = KVM_CAP_X86_USER_SPACE_MSR,
.args[0] = KVM_MSR_EXIT_REASON_FILTER,
};
static void test_msr_filter_allow(void)
{
struct kvm_vm *vm;
int rc;
......@@ -564,7 +561,7 @@ static void test_msr_filter_allow(void) {
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
......@@ -673,13 +670,8 @@ static void handle_wrmsr(struct kvm_run *run)
}
}
static void test_msr_filter_deny(void) {
struct kvm_enable_cap cap = {
.cap = KVM_CAP_X86_USER_SPACE_MSR,
.args[0] = KVM_MSR_EXIT_REASON_INVAL |
KVM_MSR_EXIT_REASON_UNKNOWN |
KVM_MSR_EXIT_REASON_FILTER,
};
static void test_msr_filter_deny(void)
{
struct kvm_vm *vm;
struct kvm_run *run;
int rc;
......@@ -691,7 +683,9 @@ static void test_msr_filter_deny(void) {
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL |
KVM_MSR_EXIT_REASON_UNKNOWN |
KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
......@@ -726,11 +720,8 @@ static void test_msr_filter_deny(void) {
kvm_vm_free(vm);
}
static void test_msr_permission_bitmap(void) {
struct kvm_enable_cap cap = {
.cap = KVM_CAP_X86_USER_SPACE_MSR,
.args[0] = KVM_MSR_EXIT_REASON_FILTER,
};
static void test_msr_permission_bitmap(void)
{
struct kvm_vm *vm;
int rc;
......@@ -740,7 +731,7 @@ static void test_msr_permission_bitmap(void) {
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
vm_enable_cap(vm, &cap);
vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment