Commit 68c1b3e9 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Open code and drop 'struct kvm_vm' accessors

Drop a variety of 'struct kvm_vm' accessors that wrap a single variable
now that tests can simply reference the variable directly.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 96a96e1a
...@@ -221,7 +221,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -221,7 +221,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
perf_test_set_wr_fract(vm, p->wr_fract); perf_test_set_wr_fract(vm, p->wr_fract);
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm); guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages); host_num_pages = vm_num_host_pages(mode, guest_num_pages);
pages_per_slot = host_num_pages / p->slots; pages_per_slot = host_num_pages / p->slots;
......
...@@ -713,21 +713,20 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -713,21 +713,20 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vm = create_vm(mode, &vcpu, vm = create_vm(mode, &vcpu,
2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code); 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
guest_page_size = vm_get_page_size(vm); guest_page_size = vm->page_size;
/* /*
* A little more than 1G of guest page sized pages. Cover the * A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages. * case where the size is not aligned to 64 pages.
*/ */
guest_num_pages = (1ul << (DIRTY_MEM_BITS - guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
vm_get_page_shift(vm))) + 3;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_page_size = getpagesize(); host_page_size = getpagesize();
host_num_pages = vm_num_host_pages(mode, guest_num_pages); host_num_pages = vm_num_host_pages(mode, guest_num_pages);
if (!p->phys_offset) { if (!p->phys_offset) {
guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
guest_num_pages) * guest_page_size; guest_page_size;
guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size); guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
} else { } else {
guest_test_phys_mem = p->phys_offset; guest_test_phys_mem = p->phys_offset;
......
...@@ -592,13 +592,7 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, ...@@ -592,13 +592,7 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
unsigned long vm_compute_max_gfn(struct kvm_vm *vm); unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
uint64_t vm_get_max_gfn(struct kvm_vm *vm);
int vm_get_kvm_fd(struct kvm_vm *vm);
int vm_get_fd(struct kvm_vm *vm);
unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
......
...@@ -260,7 +260,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) ...@@ -260,7 +260,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
/* Align down GPA of the testing memslot */ /* Align down GPA of the testing memslot */
if (!p->phys_offset) if (!p->phys_offset)
guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
guest_page_size; guest_page_size;
else else
guest_test_phys_mem = p->phys_offset; guest_test_phys_mem = p->phys_offset;
......
...@@ -1827,36 +1827,11 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) ...@@ -1827,36 +1827,11 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
} }
unsigned int vm_get_page_size(struct kvm_vm *vm)
{
return vm->page_size;
}
unsigned int vm_get_page_shift(struct kvm_vm *vm)
{
return vm->page_shift;
}
unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm) unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
{ {
return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
} }
uint64_t vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
}
int vm_get_kvm_fd(struct kvm_vm *vm)
{
return vm->kvm_fd;
}
int vm_get_fd(struct kvm_vm *vm)
{
return vm->fd;
}
static unsigned int vm_calc_num_pages(unsigned int num_pages, static unsigned int vm_calc_num_pages(unsigned int num_pages,
unsigned int page_shift, unsigned int page_shift,
unsigned int new_page_shift, unsigned int new_page_shift,
......
...@@ -159,7 +159,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus, ...@@ -159,7 +159,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
pta->vm = vm; pta->vm = vm;
/* Put the test region at the top guest physical memory. */ /* Put the test region at the top guest physical memory. */
region_end_gfn = vm_get_max_gfn(vm) + 1; region_end_gfn = vm->max_gfn + 1;
#ifdef __x86_64__ #ifdef __x86_64__
/* /*
......
...@@ -65,8 +65,7 @@ static void *vcpu_worker(void *data) ...@@ -65,8 +65,7 @@ static void *vcpu_worker(void *data)
struct kvm_sregs sregs; struct kvm_sregs sregs;
struct kvm_regs regs; struct kvm_regs regs;
vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
vm_get_page_size(vm));
/* Snapshot regs before the first run. */ /* Snapshot regs before the first run. */
vcpu_regs_get(vcpu, &regs); vcpu_regs_get(vcpu, &regs);
...@@ -104,7 +103,7 @@ static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, ...@@ -104,7 +103,7 @@ static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges"); TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
~((uint64_t)vm_get_page_size(vm) - 1); ~((uint64_t)vm->page_size - 1);
TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus); TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
...@@ -220,7 +219,7 @@ int main(int argc, char *argv[]) ...@@ -220,7 +219,7 @@ int main(int argc, char *argv[])
vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
max_gpa = vm_get_max_gfn(vm) << vm_get_page_shift(vm); max_gpa = vm->max_gfn << vm->page_shift;
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb "); TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
fd = kvm_memfd_alloc(slot_size, hugepages); fd = kvm_memfd_alloc(slot_size, hugepages);
...@@ -230,7 +229,7 @@ int main(int argc, char *argv[]) ...@@ -230,7 +229,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed"); TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
/* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */ /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
for (i = 0; i < slot_size; i += vm_get_page_size(vm)) for (i = 0; i < slot_size; i += vm->page_size)
((uint8_t *)mem)[i] = 0xaa; ((uint8_t *)mem)[i] = 0xaa;
gpa = 0; gpa = 0;
...@@ -249,7 +248,7 @@ int main(int argc, char *argv[]) ...@@ -249,7 +248,7 @@ int main(int argc, char *argv[])
for (i = 0; i < slot_size; i += size_1gb) for (i = 0; i < slot_size; i += size_1gb)
__virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G); __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
#else #else
for (i = 0; i < slot_size; i += vm_get_page_size(vm)) for (i = 0; i < slot_size; i += vm->page_size)
virt_pg_map(vm, gpa + i, gpa + i); virt_pg_map(vm, gpa + i, gpa + i);
#endif #endif
} }
......
...@@ -75,7 +75,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, ...@@ -75,7 +75,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
* Add the dummy memslot just below the perf_test_util memslot, which is * Add the dummy memslot just below the perf_test_util memslot, which is
* at the top of the guest physical address space. * at the top of the guest physical address space.
*/ */
gpa = perf_test_args.gpa - pages * vm_get_page_size(vm); gpa = perf_test_args.gpa - pages * vm->page_size;
for (i = 0; i < nr_modifications; i++) { for (i = 0; i < nr_modifications; i++) {
usleep(delay); usleep(delay);
......
...@@ -121,7 +121,7 @@ void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu) ...@@ -121,7 +121,7 @@ void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
if (vcpu) if (vcpu)
ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
else else
ret = __kvm_ioctl(vm_get_kvm_fd(vm), KVM_GET_SUPPORTED_HV_CPUID, &cpuid); ret = __kvm_ioctl(vm->kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
TEST_ASSERT(ret == -1 && errno == E2BIG, TEST_ASSERT(ret == -1 && errno == E2BIG,
"%s KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when" "%s KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment