Commit e5bc4d46 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'kvm-selftest' into kvm-master

- Cleanups for the perf test infrastructure and mapping hugepages

- Avoid contention on mmap_sem when the guests start to run

- Add event channel upcall support to xen_shinfo_test
parents 84886c26 e2bd9365
......@@ -47,7 +47,7 @@
#include "guest_modes.h"
/* Global variable used to synchronize all of the vCPU threads. */
static int iteration = -1;
static int iteration;
/* Defines what vCPU threads should do during a given iteration. */
static enum {
......@@ -215,12 +215,11 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
return true;
}
static void *vcpu_thread_main(void *arg)
static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
{
struct perf_test_vcpu_args *vcpu_args = arg;
struct kvm_vm *vm = perf_test_args.vm;
int vcpu_id = vcpu_args->vcpu_id;
int current_iteration = -1;
int current_iteration = 0;
while (spin_wait_for_next_iteration(&current_iteration)) {
switch (READ_ONCE(iteration_work)) {
......@@ -235,8 +234,6 @@ static void *vcpu_thread_main(void *arg)
vcpu_last_completed_iteration[vcpu_id] = current_iteration;
}
return NULL;
}
static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
......@@ -277,8 +274,7 @@ static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
const char *description)
{
perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
sync_global_to_guest(vm, perf_test_args);
perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1);
iteration_work = ITERATION_ACCESS_MEMORY;
run_iteration(vm, vcpus, description);
}
......@@ -296,48 +292,16 @@ static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
run_iteration(vm, vcpus, "Mark memory idle");
}
static pthread_t *create_vcpu_threads(int vcpus)
{
pthread_t *vcpu_threads;
int i;
vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
for (i = 0; i < vcpus; i++) {
vcpu_last_completed_iteration[i] = iteration;
pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
&perf_test_args.vcpu_args[i]);
}
return vcpu_threads;
}
static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
{
int i;
/* Set done to signal the vCPU threads to exit */
done = true;
for (i = 0; i < vcpus; i++)
pthread_join(vcpu_threads[i], NULL);
}
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *params = arg;
struct kvm_vm *vm;
pthread_t *vcpu_threads;
int vcpus = params->vcpus;
vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1,
params->backing_src);
params->backing_src, !overlap_memory_access);
perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
!overlap_memory_access);
vcpu_threads = create_vcpu_threads(vcpus);
perf_test_start_vcpu_threads(vcpus, vcpu_thread_main);
pr_info("\n");
access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
......@@ -352,8 +316,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
mark_memory_idle(vm, vcpus);
access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
terminate_vcpu_threads(vcpu_threads, vcpus);
free(vcpu_threads);
/* Set done to signal the vCPU threads to exit */
done = true;
perf_test_join_vcpu_threads(vcpus);
perf_test_destroy_vm(vm);
}
......
......@@ -42,10 +42,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;
static void *vcpu_worker(void *data)
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
int ret;
struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
int vcpu_id = vcpu_args->vcpu_id;
struct kvm_vm *vm = perf_test_args.vm;
struct kvm_run *run;
......@@ -68,8 +67,6 @@ static void *vcpu_worker(void *data)
ts_diff = timespec_elapsed(start);
PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
ts_diff.tv_sec, ts_diff.tv_nsec);
return NULL;
}
static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
......@@ -282,7 +279,6 @@ struct test_params {
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
pthread_t *vcpu_threads;
pthread_t *uffd_handler_threads = NULL;
struct uffd_handler_args *uffd_args = NULL;
struct timespec start;
......@@ -293,9 +289,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
int r;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
p->src_type);
perf_test_args.wr_fract = 1;
p->src_type, p->partition_vcpu_memory_access);
demand_paging_size = get_backing_src_pagesz(p->src_type);
......@@ -304,12 +298,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
"Failed to allocate buffer for guest data pattern");
memset(guest_data_prototype, 0xAB, demand_paging_size);
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
p->partition_vcpu_memory_access);
if (p->uffd_mode) {
uffd_handler_threads =
malloc(nr_vcpus * sizeof(*uffd_handler_threads));
......@@ -322,26 +310,15 @@ static void run_test(enum vm_guest_mode mode, void *arg)
TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vm_paddr_t vcpu_gpa;
struct perf_test_vcpu_args *vcpu_args;
void *vcpu_hva;
void *vcpu_alias;
uint64_t vcpu_mem_size;
if (p->partition_vcpu_memory_access) {
vcpu_gpa = guest_test_phys_mem +
(vcpu_id * guest_percpu_mem_size);
vcpu_mem_size = guest_percpu_mem_size;
} else {
vcpu_gpa = guest_test_phys_mem;
vcpu_mem_size = guest_percpu_mem_size * nr_vcpus;
}
PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
/* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
vcpu_alias = addr_gpa2alias(vm, vcpu_gpa);
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa);
/*
* Set up user fault fd to handle demand paging
......@@ -355,32 +332,18 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pipefds[vcpu_id * 2], p->uffd_mode,
p->uffd_delay, &uffd_args[vcpu_id],
vcpu_hva, vcpu_alias,
vcpu_mem_size);
vcpu_args->pages * perf_test_args.guest_page_size);
}
}
/* Export the shared variables to the guest */
sync_global_to_guest(vm, perf_test_args);
pr_info("Finished creating vCPUs and starting uffd threads\n");
clock_gettime(CLOCK_MONOTONIC, &start);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
&perf_test_args.vcpu_args[vcpu_id]);
}
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");
/* Wait for the vcpu threads to quit */
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
pthread_join(vcpu_threads[vcpu_id], NULL);
PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id);
}
perf_test_join_vcpu_threads(nr_vcpus);
ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n");
if (p->uffd_mode) {
......@@ -404,7 +367,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
perf_test_destroy_vm(vm);
free(guest_data_prototype);
free(vcpu_threads);
if (p->uffd_mode) {
free(uffd_handler_threads);
free(uffd_args);
......
......@@ -31,7 +31,7 @@ static bool host_quit;
static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
static void *vcpu_worker(void *data)
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
int ret;
struct kvm_vm *vm = perf_test_args.vm;
......@@ -41,7 +41,6 @@ static void *vcpu_worker(void *data)
struct timespec ts_diff;
struct timespec total = (struct timespec){0};
struct timespec avg;
struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
int vcpu_id = vcpu_args->vcpu_id;
run = vcpu_state(vm, vcpu_id);
......@@ -83,8 +82,6 @@ static void *vcpu_worker(void *data)
pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
return NULL;
}
struct test_params {
......@@ -170,7 +167,6 @@ static void free_bitmaps(unsigned long *bitmaps[], int slots)
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
unsigned long **bitmaps;
uint64_t guest_num_pages;
......@@ -186,9 +182,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct timespec clear_dirty_log_total = (struct timespec){0};
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
p->slots, p->backing_src);
p->slots, p->backing_src,
p->partition_vcpu_memory_access);
perf_test_args.wr_fract = p->wr_fract;
perf_test_set_wr_fract(vm, p->wr_fract);
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
......@@ -203,25 +200,15 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vm_enable_cap(vm, &cap);
}
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
p->partition_vcpu_memory_access);
sync_global_to_guest(vm, perf_test_args);
/* Start the iterations */
iteration = 0;
host_quit = false;
clock_gettime(CLOCK_MONOTONIC, &start);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
vcpu_last_completed_iteration[vcpu_id] = -1;
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
&perf_test_args.vcpu_args[vcpu_id]);
}
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
/* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration);
......@@ -290,8 +277,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Tell the vcpu thread to quit */
host_quit = true;
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
pthread_join(vcpu_threads[vcpu_id], NULL);
perf_test_join_vcpu_threads(nr_vcpus);
avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
......@@ -306,7 +292,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
}
free_bitmaps(bitmaps, p->slots);
free(vcpu_threads);
perf_test_destroy_vm(vm);
}
......
......@@ -115,7 +115,7 @@ static void guest_code(void)
addr = guest_test_virt_mem;
addr += (READ_ONCE(random_array[i]) % guest_num_pages)
* guest_page_size;
addr &= ~(host_page_size - 1);
addr = align_down(addr, host_page_size);
*(uint64_t *)addr = READ_ONCE(iteration);
}
......@@ -737,14 +737,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
if (!p->phys_offset) {
guest_test_phys_mem = (vm_get_max_gfn(vm) -
guest_num_pages) * guest_page_size;
guest_test_phys_mem &= ~(host_page_size - 1);
guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
} else {
guest_test_phys_mem = p->phys_offset;
}
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem &= ~((1 << 20) - 1);
guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
#endif
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
......
......@@ -8,6 +8,8 @@
#ifndef SELFTEST_KVM_PERF_TEST_UTIL_H
#define SELFTEST_KVM_PERF_TEST_UTIL_H
#include <pthread.h>
#include "kvm_util.h"
/* Default guest test virtual memory offset */
......@@ -18,6 +20,7 @@
#define PERF_TEST_MEM_SLOT_INDEX 1
struct perf_test_vcpu_args {
uint64_t gpa;
uint64_t gva;
uint64_t pages;
......@@ -27,7 +30,7 @@ struct perf_test_vcpu_args {
struct perf_test_args {
struct kvm_vm *vm;
uint64_t host_page_size;
uint64_t gpa;
uint64_t guest_page_size;
int wr_fract;
......@@ -36,19 +39,15 @@ struct perf_test_args {
extern struct perf_test_args perf_test_args;
/*
* Guest physical memory offset of the testing memory slot.
* This will be set to the topmost valid physical address minus
* the test memory size.
*/
extern uint64_t guest_test_phys_mem;
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src);
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access);
void perf_test_destroy_vm(struct kvm_vm *vm);
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access);
void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
void perf_test_join_vcpu_threads(int vcpus);
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
......@@ -104,6 +104,7 @@ size_t get_trans_hugepagesz(void);
size_t get_def_hugetlb_pagesz(void);
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
size_t get_backing_src_pagesz(uint32_t i);
bool is_backing_src_hugetlb(uint32_t i);
void backing_src_help(const char *flag);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
long get_run_delay(void);
......@@ -117,4 +118,29 @@ static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t)
return vm_mem_backing_src_alias(t)->flag & MAP_SHARED;
}
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
static inline uint64_t align_up(uint64_t x, uint64_t size)
{
uint64_t mask = size - 1;
TEST_ASSERT(size != 0 && !(size & (size - 1)),
"size not a power of 2: %lu", size);
return ((x + mask) & ~mask);
}
static inline uint64_t align_down(uint64_t x, uint64_t size)
{
uint64_t x_aligned_up = align_up(x, size);
if (x == x_aligned_up)
return x;
else
return x_aligned_up - size;
}
static inline void *align_ptr_up(void *x, size_t size)
{
return (void *)align_up((unsigned long)x, size);
}
#endif /* SELFTEST_KVM_TEST_UTIL_H */
......@@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
#ifdef __s390x__
alignment = max(0x100000, alignment);
#endif
guest_test_phys_mem &= ~(alignment - 1);
guest_test_phys_mem = align_down(guest_test_virt_mem, alignment);
/* Set up the shared data structure test_args */
test_args.vm = vm;
......
......@@ -157,8 +157,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
"memsize of 0,\n"
" phdr index: %u p_memsz: 0x%" PRIx64,
n1, (uint64_t) phdr.p_memsz);
vm_vaddr_t seg_vstart = phdr.p_vaddr;
seg_vstart &= ~(vm_vaddr_t)(vm->page_size - 1);
vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1;
......
......@@ -22,15 +22,6 @@
static int vcpu_mmap_sz(void);
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
static void *align(void *x, size_t size)
{
size_t mask = size - 1;
TEST_ASSERT(size != 0 && !(size & (size - 1)),
"size not a power of 2: %lu", size);
return (void *) (((size_t) x + mask) & ~mask);
}
int open_path_or_exit(const char *path, int flags)
{
int fd;
......@@ -191,15 +182,15 @@ const char *vm_guest_mode_string(uint32_t i)
}
const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 52, 48, 0x1000, 12 },
{ 52, 48, 0x10000, 16 },
{ 48, 48, 0x1000, 12 },
{ 48, 48, 0x10000, 16 },
{ 40, 48, 0x1000, 12 },
{ 40, 48, 0x10000, 16 },
{ 0, 0, 0x1000, 12 },
{ 47, 64, 0x1000, 12 },
{ 44, 64, 0x1000, 12 },
[VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
[VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
[VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
[VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
[VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
[VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
[VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
[VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
[VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
......@@ -879,9 +870,17 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
alignment = 1;
#endif
/*
* When using THP mmap is not guaranteed to returned a hugepage aligned
* address so we have to pad the mmap. Padding is not needed for HugeTLB
* because mmap will always return an address aligned to the HugeTLB
* page size.
*/
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
alignment = max(backing_src_pagesz, alignment);
ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
/* Add enough memory to align up if necessary */
if (alignment > 1)
region->mmap_size += alignment;
......@@ -914,8 +913,13 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
"test_malloc failed, mmap_start: %p errno: %i",
region->mmap_start, errno);
TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
"mmap_start %p is not aligned to HugeTLB page size 0x%lx",
region->mmap_start, backing_src_pagesz);
/* Align host address */
region->host_mem = align(region->mmap_start, alignment);
region->host_mem = align_ptr_up(region->mmap_start, alignment);
/* As needed perform madvise */
if ((src_type == VM_MEM_SRC_ANONYMOUS ||
......@@ -958,7 +962,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
"mmap of alias failed, errno: %i", errno);
/* Align host alias address */
region->host_alias = align(region->mmap_alias, alignment);
region->host_alias = align_ptr_up(region->mmap_alias, alignment);
}
}
......
......@@ -10,21 +10,40 @@
struct perf_test_args perf_test_args;
uint64_t guest_test_phys_mem;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
struct vcpu_thread {
/* The id of the vCPU. */
int vcpu_id;
/* The pthread backing the vCPU. */
pthread_t thread;
/* Set to true once the vCPU thread is up and running. */
bool running;
};
/* The vCPU threads involved in this test. */
static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
/* The function run by each vCPU thread, as provided by the test. */
static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
/* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running;
/*
* Continuously write to the first 8 bytes of each page in the
* specified region.
*/
static void guest_code(uint32_t vcpu_id)
{
struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
struct perf_test_args *pta = &perf_test_args;
struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id];
uint64_t gva;
uint64_t pages;
int i;
......@@ -37,9 +56,9 @@ static void guest_code(uint32_t vcpu_id)
while (true) {
for (i = 0; i < pages; i++) {
uint64_t addr = gva + (i * perf_test_args.guest_page_size);
uint64_t addr = gva + (i * pta->guest_page_size);
if (i % perf_test_args.wr_fract == 0)
if (i % pta->wr_fract == 0)
*(uint64_t *)addr = 0x0123456789ABCDEF;
else
READ_ONCE(*(uint64_t *)addr);
......@@ -49,35 +68,81 @@ static void guest_code(uint32_t vcpu_id)
}
}
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
struct perf_test_args *pta = &perf_test_args;
struct perf_test_vcpu_args *vcpu_args;
int vcpu_id;
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
vcpu_args = &pta->vcpu_args[vcpu_id];
vcpu_args->vcpu_id = vcpu_id;
if (partition_vcpu_memory_access) {
vcpu_args->gva = guest_test_virt_mem +
(vcpu_id * vcpu_memory_bytes);
vcpu_args->pages = vcpu_memory_bytes /
pta->guest_page_size;
vcpu_args->gpa = pta->gpa + (vcpu_id * vcpu_memory_bytes);
} else {
vcpu_args->gva = guest_test_virt_mem;
vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
pta->guest_page_size;
vcpu_args->gpa = pta->gpa;
}
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
vcpu_id, vcpu_args->gpa, vcpu_args->gpa +
(vcpu_args->pages * pta->guest_page_size));
}
}
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src)
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access)
{
struct perf_test_args *pta = &perf_test_args;
struct kvm_vm *vm;
uint64_t guest_num_pages;
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
perf_test_args.host_page_size = getpagesize();
perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
/* By default vCPUs will write to memory. */
pta->wr_fract = 1;
/*
* Snapshot the non-huge page size. This is used by the guest code to
* access/dirty pages at the logging granularity.
*/
pta->guest_page_size = vm_guest_mode_params[mode].page_size;
guest_num_pages = vm_adjust_num_guest_pages(mode,
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size);
(vcpus * vcpu_memory_bytes) / pta->guest_page_size);
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
"Guest memory size is not host page size aligned.");
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
TEST_ASSERT(vcpu_memory_bytes % pta->guest_page_size == 0,
"Guest memory size is not guest page size aligned.");
TEST_ASSERT(guest_num_pages % slots == 0,
"Guest memory cannot be evenly divided into %d slots.",
slots);
/*
* Pass guest_num_pages to populate the page tables for test memory.
* The memory is also added to memslot 0, but that's a benign side
* effect as KVM allows aliasing HVAs in meslots.
*/
vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
0, guest_code, NULL);
guest_num_pages, 0, guest_code, NULL);
perf_test_args.vm = vm;
pta->vm = vm;
/*
* If there should be more memory in the guest test region than there
......@@ -90,20 +155,18 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
guest_num_pages, vm_get_max_gfn(vm), vcpus,
vcpu_memory_bytes);
guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
perf_test_args.guest_page_size;
guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size;
pta->gpa = align_down(pta->gpa, backing_src_pagesz);
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem &= ~((1 << 20) - 1);
pta->gpa = align_down(pta->gpa, 1 << 20);
#endif
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
pr_info("guest physical test memory offset: 0x%lx\n", pta->gpa);
/* Add extra memory slots for testing */
for (i = 0; i < slots; i++) {
uint64_t region_pages = guest_num_pages / slots;
vm_paddr_t region_start = guest_test_phys_mem +
region_pages * perf_test_args.guest_page_size * i;
vm_paddr_t region_start = pta->gpa + region_pages * pta->guest_page_size * i;
vm_userspace_mem_region_add(vm, backing_src, region_start,
PERF_TEST_MEM_SLOT_INDEX + i,
......@@ -111,10 +174,15 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
}
/* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages);
perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access);
ucall_init(vm, NULL);
/* Export the shared variables to the guest. */
sync_global_to_guest(vm, perf_test_args);
return vm;
}
......@@ -124,36 +192,60 @@ void perf_test_destroy_vm(struct kvm_vm *vm)
kvm_vm_free(vm);
}
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
{
perf_test_args.wr_fract = wr_fract;
sync_global_to_guest(vm, perf_test_args);
}
static void *vcpu_thread_main(void *data)
{
struct vcpu_thread *vcpu = data;
WRITE_ONCE(vcpu->running, true);
/*
* Wait for all vCPU threads to be up and running before calling the test-
* provided vCPU thread function. This prevents thread creation (which
* requires taking the mmap_sem in write mode) from interfering with the
* guest faulting in its memory.
*/
while (!READ_ONCE(all_vcpu_threads_running))
;
vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_id]);
return NULL;
}
void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *))
{
vm_paddr_t vcpu_gpa;
struct perf_test_vcpu_args *vcpu_args;
int vcpu_id;
vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false);
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
struct vcpu_thread *vcpu = &vcpu_threads[vcpu_id];
vcpu_args->vcpu_id = vcpu_id;
if (partition_vcpu_memory_access) {
vcpu_args->gva = guest_test_virt_mem +
(vcpu_id * vcpu_memory_bytes);
vcpu_args->pages = vcpu_memory_bytes /
perf_test_args.guest_page_size;
vcpu_gpa = guest_test_phys_mem +
(vcpu_id * vcpu_memory_bytes);
} else {
vcpu_args->gva = guest_test_virt_mem;
vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
perf_test_args.guest_page_size;
vcpu_gpa = guest_test_phys_mem;
}
vcpu->vcpu_id = vcpu_id;
WRITE_ONCE(vcpu->running, false);
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
}
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
vcpu_id, vcpu_gpa, vcpu_gpa +
(vcpu_args->pages * perf_test_args.guest_page_size));
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
while (!READ_ONCE(vcpu_threads[vcpu_id].running))
;
}
WRITE_ONCE(all_vcpu_threads_running, true);
}
void perf_test_join_vcpu_threads(int vcpus)
{
int vcpu_id;
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
pthread_join(vcpu_threads[vcpu_id].thread, NULL);
}
......@@ -283,6 +283,11 @@ size_t get_backing_src_pagesz(uint32_t i)
}
}
bool is_backing_src_hugetlb(uint32_t i)
{
return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB);
}
static void print_available_backing_src_types(const char *prefix)
{
int i;
......
......@@ -36,11 +36,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus = true;
static void *vcpu_worker(void *data)
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
int ret;
struct perf_test_vcpu_args *vcpu_args =
(struct perf_test_vcpu_args *)data;
int vcpu_id = vcpu_args->vcpu_id;
struct kvm_vm *vm = perf_test_args.vm;
struct kvm_run *run;
......@@ -59,8 +57,6 @@ static void *vcpu_worker(void *data)
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
}
return NULL;
}
struct memslot_antagonist_args {
......@@ -80,7 +76,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
* Add the dummy memslot just below the perf_test_util memslot, which is
* at the top of the guest physical address space.
*/
gpa = guest_test_phys_mem - pages * vm_get_page_size(vm);
gpa = perf_test_args.gpa - pages * vm_get_page_size(vm);
for (i = 0; i < nr_modifications; i++) {
usleep(delay);
......@@ -100,29 +96,15 @@ struct test_params {
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
int vcpu_id;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
VM_MEM_SRC_ANONYMOUS);
perf_test_args.wr_fract = 1;
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
p->partition_vcpu_memory_access);
/* Export the shared variables to the guest */
sync_global_to_guest(vm, perf_test_args);
VM_MEM_SRC_ANONYMOUS,
p->partition_vcpu_memory_access);
pr_info("Finished creating vCPUs\n");
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
&perf_test_args.vcpu_args[vcpu_id]);
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");
......@@ -131,16 +113,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
run_vcpus = false;
/* Wait for the vcpu threads to quit */
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
pthread_join(vcpu_threads[vcpu_id], NULL);
perf_test_join_vcpu_threads(nr_vcpus);
pr_info("All vCPU threads joined\n");
ucall_uninit(vm);
kvm_vm_free(vm);
free(vcpu_threads);
perf_test_destroy_vm(vm);
}
static void help(char *name)
......
......@@ -24,8 +24,12 @@
#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE)
#define RUNSTATE_ADDR (SHINFO_REGION_GPA + PAGE_SIZE + 0x20)
#define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40)
#define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + 0x20)
#define VCPU_INFO_VADDR (SHINFO_REGION_GVA + 0x40)
#define EVTCHN_VECTOR 0x10
static struct kvm_vm *vm;
......@@ -56,15 +60,44 @@ struct vcpu_runstate_info {
uint64_t time[4];
};
struct arch_vcpu_info {
unsigned long cr2;
unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
};
struct vcpu_info {
uint8_t evtchn_upcall_pending;
uint8_t evtchn_upcall_mask;
unsigned long evtchn_pending_sel;
struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */
#define RUNSTATE_running 0
#define RUNSTATE_runnable 1
#define RUNSTATE_blocked 2
#define RUNSTATE_offline 3
static void evtchn_handler(struct ex_regs *regs)
{
struct vcpu_info *vi = (void *)VCPU_INFO_VADDR;
vi->evtchn_upcall_pending = 0;
GUEST_SYNC(0x20);
}
static void guest_code(void)
{
struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
__asm__ __volatile__(
"sti\n"
"nop\n"
);
/* Trigger an interrupt injection */
GUEST_SYNC(0);
/* Test having the host set runstates manually */
GUEST_SYNC(RUNSTATE_runnable);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
......@@ -153,7 +186,7 @@ int main(int argc, char *argv[])
struct kvm_xen_vcpu_attr vi = {
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
.u.gpa = SHINFO_REGION_GPA + 0x40,
.u.gpa = VCPU_INFO_ADDR,
};
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi);
......@@ -163,6 +196,16 @@ int main(int argc, char *argv[])
};
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock);
struct kvm_xen_hvm_attr vec = {
.type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
.u.vector = EVTCHN_VECTOR,
};
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
if (do_runstate_tests) {
struct kvm_xen_vcpu_attr st = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
......@@ -171,9 +214,14 @@ int main(int argc, char *argv[])
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st);
}
struct vcpu_info *vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
vinfo->evtchn_upcall_pending = 0;
struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);
rs->state = 0x5a;
bool evtchn_irq_expected = false;
for (;;) {
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
struct ucall uc;
......@@ -193,16 +241,21 @@ int main(int argc, char *argv[])
struct kvm_xen_vcpu_attr rst;
long rundelay;
/* If no runstate support, bail out early */
if (!do_runstate_tests)
goto done;
TEST_ASSERT(rs->state_entry_time == rs->time[0] +
rs->time[1] + rs->time[2] + rs->time[3],
"runstate times don't add up");
if (do_runstate_tests)
TEST_ASSERT(rs->state_entry_time == rs->time[0] +
rs->time[1] + rs->time[2] + rs->time[3],
"runstate times don't add up");
switch (uc.args[1]) {
case RUNSTATE_running...RUNSTATE_offline:
case 0:
evtchn_irq_expected = true;
vinfo->evtchn_upcall_pending = 1;
break;
case RUNSTATE_runnable...RUNSTATE_offline:
TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen");
if (!do_runstate_tests)
goto done;
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
rst.u.runstate.state = uc.args[1];
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst);
......@@ -236,6 +289,10 @@ int main(int argc, char *argv[])
sched_yield();
} while (get_run_delay() < rundelay);
break;
case 0x20:
TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ");
evtchn_irq_expected = false;
break;
}
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment