Commit 7c236b81 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: selftests: Create a separate dirty bitmap per slot

The calculation to get the per-slot dirty bitmap was incorrect leading
to a buffer overrun. Fix it by splitting out the dirty bitmap into a
separate bitmap per slot.

Fixes: 609e6202 ("KVM: selftests: Support multiple slots in dirty_log_perf_test")
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Message-Id: <20210917173657.44011-4-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9f2fc555
......@@ -118,42 +118,64 @@ static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
toggle_dirty_logging(vm, slots, false);
}
static void get_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
uint64_t nr_pages)
static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
{
uint64_t slot_pages = nr_pages / slots;
int i;
for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i;
unsigned long *slot_bitmap = bitmap + i * slot_pages;
kvm_vm_get_dirty_log(vm, slot, slot_bitmap);
kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
}
}
static void clear_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
uint64_t nr_pages)
static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
int slots, uint64_t pages_per_slot)
{
uint64_t slot_pages = nr_pages / slots;
int i;
for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i;
unsigned long *slot_bitmap = bitmap + i * slot_pages;
kvm_vm_clear_dirty_log(vm, slot, slot_bitmap, 0, slot_pages);
kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
}
}
static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
{
unsigned long **bitmaps;
int i;
bitmaps = malloc(slots * sizeof(bitmaps[0]));
TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
for (i = 0; i < slots; i++) {
bitmaps[i] = bitmap_zalloc(pages_per_slot);
TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
}
return bitmaps;
}
static void free_bitmaps(unsigned long *bitmaps[], int slots)
{
int i;
for (i = 0; i < slots; i++)
free(bitmaps[i]);
free(bitmaps);
}
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
unsigned long *bmap;
unsigned long **bitmaps;
uint64_t guest_num_pages;
uint64_t host_num_pages;
uint64_t pages_per_slot;
int vcpu_id;
struct timespec start;
struct timespec ts_diff;
......@@ -171,7 +193,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
bmap = bitmap_zalloc(host_num_pages);
pages_per_slot = host_num_pages / p->slots;
bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
if (dirty_log_manual_caps) {
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
......@@ -239,7 +263,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
clock_gettime(CLOCK_MONOTONIC, &start);
get_dirty_log(vm, p->slots, bmap, host_num_pages);
get_dirty_log(vm, bitmaps, p->slots);
ts_diff = timespec_elapsed(start);
get_dirty_log_total = timespec_add(get_dirty_log_total,
ts_diff);
......@@ -248,7 +272,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
if (dirty_log_manual_caps) {
clock_gettime(CLOCK_MONOTONIC, &start);
clear_dirty_log(vm, p->slots, bmap, host_num_pages);
clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
ts_diff = timespec_elapsed(start);
clear_dirty_log_total = timespec_add(clear_dirty_log_total,
ts_diff);
......@@ -281,7 +305,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
free(bmap);
free_bitmaps(bitmaps, p->slots);
free(vcpu_threads);
perf_test_destroy_vm(vm);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment