Commit d4ae84a0 authored by Igor Mammedov's avatar Igor Mammedov Committed by Paolo Bonzini

kvm: search_memslots: add simple LRU memslot caching

In typical guest boot workload only 2-3 memslots are used
extensively, and at that it's mostly the same memslot
lookup operation.

Adding LRU cache improves average lookup time from
46 to 28 cycles (~40%) for this workload.
Signed-off-by: default avatarIgor Mammedov <imammedo@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7f379cff
...@@ -353,6 +353,7 @@ struct kvm_memslots { ...@@ -353,6 +353,7 @@ struct kvm_memslots {
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */ /* The mapping table from slot id to the index in memslots[]. */
short id_to_index[KVM_MEM_SLOTS_NUM]; short id_to_index[KVM_MEM_SLOTS_NUM];
atomic_t lru_slot;
}; };
struct kvm { struct kvm {
...@@ -790,13 +791,20 @@ static inline void kvm_guest_exit(void) ...@@ -790,13 +791,20 @@ static inline void kvm_guest_exit(void)
static inline struct kvm_memory_slot * static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn) search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{ {
struct kvm_memory_slot *memslot; int slot = atomic_read(&slots->lru_slot);
struct kvm_memory_slot *memslot = &slots->memslots[slot];
kvm_for_each_memslot(memslot, slots)
if (gfn >= memslot->base_gfn && if (gfn >= memslot->base_gfn &&
gfn < memslot->base_gfn + memslot->npages) gfn < memslot->base_gfn + memslot->npages)
return memslot; return memslot;
kvm_for_each_memslot(memslot, slots)
if (gfn >= memslot->base_gfn &&
gfn < memslot->base_gfn + memslot->npages) {
atomic_set(&slots->lru_slot, memslot - slots->memslots);
return memslot;
}
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment