Commit 063584d4 authored by Igor Mammedov's avatar Igor Mammedov Committed by Paolo Bonzini

kvm: memslots: replace heap sort with an insertion sort pass

memslots is a sorted array.  When a slot is changed, heapsort (lib/sort.c)
would take O(n log n) time to update it; an optimized insertion sort will
only cost O(n) on an array with just one item out of order.

Replace sort() with a custom sort that takes advantage of memslots usage
pattern and the known position of the changed slot.

performance change of 128 memslots insertions with gradually increasing
size (the worst case):

      heap sort   custom sort
max:  249747      2500 cycles

with custom sort alg taking ~98% less then original
update time.
Signed-off-by: default avatarIgor Mammedov <imammedo@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1d4e7e3c
...@@ -668,31 +668,37 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) ...@@ -668,31 +668,37 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
return 0; return 0;
} }
static int cmp_memslot(const void *slot1, const void *slot2)
{
struct kvm_memory_slot *s1, *s2;
s1 = (struct kvm_memory_slot *)slot1;
s2 = (struct kvm_memory_slot *)slot2;
if (s1->npages < s2->npages)
return 1;
if (s1->npages > s2->npages)
return -1;
return 0;
}
/* /*
* Sort the memslots base on its size, so the larger slots * Insert memslot and re-sort memslots based on their size,
* will get better fit. * so the larger slots will get better fit. Sorting algorithm
* takes advantage of having initially sorted array and
* known changed memslot position.
*/ */
static void sort_memslots(struct kvm_memslots *slots) static void insert_memslot(struct kvm_memslots *slots,
struct kvm_memory_slot *new)
{ {
int i; int i = slots->id_to_index[new->id];
struct kvm_memory_slot *old = id_to_memslot(slots, new->id);
struct kvm_memory_slot *mslots = slots->memslots;
sort(slots->memslots, KVM_MEM_SLOTS_NUM, if (new->npages == old->npages) {
sizeof(struct kvm_memory_slot), cmp_memslot, NULL); *old = *new;
return;
}
while (1) {
if (i < (KVM_MEM_SLOTS_NUM - 1) &&
new->npages < mslots[i + 1].npages) {
mslots[i] = mslots[i + 1];
i++;
} else if (i > 0 && new->npages > mslots[i - 1].npages) {
mslots[i] = mslots[i - 1];
i--;
} else {
mslots[i] = *new;
break;
}
}
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
slots->id_to_index[slots->memslots[i].id] = i; slots->id_to_index[slots->memslots[i].id] = i;
...@@ -702,13 +708,7 @@ static void update_memslots(struct kvm_memslots *slots, ...@@ -702,13 +708,7 @@ static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new) struct kvm_memory_slot *new)
{ {
if (new) { if (new) {
int id = new->id; insert_memslot(slots, new);
struct kvm_memory_slot *old = id_to_memslot(slots, id);
unsigned long npages = old->npages;
*old = *new;
if (new->npages != npages)
sort_memslots(slots);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment