Commit 914ebccd authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Avi Kivity

KVM: x86: avoid unnecessary bitmap allocation when memslot is clean

Although we always allocate a new dirty bitmap in x86's get_dirty_log(),
it is only used as a zero-source of copy_to_user() and freed right after
that when memslot is clean. This patch uses clear_user() instead of doing
this unnecessary zero-source allocation.

Performance improvement: as we can expect easily, the time needed to
allocate a bitmap is completely reduced. In my test, the improved ioctl
was about 4 to 10 times faster than the original one for clean slots.
Furthermore, reducing memory allocations and copies will produce good
effects to caches too.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent c332c83a
...@@ -2797,7 +2797,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -2797,7 +2797,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
unsigned long n; unsigned long n;
unsigned long is_dirty = 0; unsigned long is_dirty = 0;
unsigned long *dirty_bitmap = NULL;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
...@@ -2812,27 +2811,30 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -2812,27 +2811,30 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
n = kvm_dirty_bitmap_bytes(memslot); n = kvm_dirty_bitmap_bytes(memslot);
r = -ENOMEM;
dirty_bitmap = vmalloc(n);
if (!dirty_bitmap)
goto out;
memset(dirty_bitmap, 0, n);
for (i = 0; !is_dirty && i < n/sizeof(long); i++) for (i = 0; !is_dirty && i < n/sizeof(long); i++)
is_dirty = memslot->dirty_bitmap[i]; is_dirty = memslot->dirty_bitmap[i];
/* If nothing is dirty, don't bother messing with page tables. */ /* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) { if (is_dirty) {
struct kvm_memslots *slots, *old_slots; struct kvm_memslots *slots, *old_slots;
unsigned long *dirty_bitmap;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot); kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); r = -ENOMEM;
if (!slots) dirty_bitmap = vmalloc(n);
goto out_free; if (!dirty_bitmap)
goto out;
memset(dirty_bitmap, 0, n);
r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!slots) {
vfree(dirty_bitmap);
goto out;
}
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
...@@ -2841,13 +2843,20 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -2841,13 +2843,20 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
synchronize_srcu_expedited(&kvm->srcu); synchronize_srcu_expedited(&kvm->srcu);
dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
kfree(old_slots); kfree(old_slots);
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
vfree(dirty_bitmap);
goto out;
}
vfree(dirty_bitmap);
} else {
r = -EFAULT;
if (clear_user(log->dirty_bitmap, n))
goto out;
} }
r = 0; r = 0;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
r = -EFAULT;
out_free:
vfree(dirty_bitmap);
out: out:
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
return r; return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment