Commit 601f8af0 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Leverage vcpu->last_used_slot for rmap_add and rmap_recycle

rmap_add() and rmap_recycle() both run in the context of the vCPU and
thus we can use kvm_vcpu_gfn_to_memslot() to look up the memslot. This
enables rmap_add() and rmap_recycle() to take advantage of
vcpu->last_used_slot and avoid expensive memslot searching.

This change improves the performance of "Populate memory time" in
dirty_log_perf_test with tdp_mmu=N. In addition to improving the
performance, "Populate memory time" no longer scales with the number
of memslots in the VM.

Command                         | Before           | After
------------------------------- | ---------------- | -------------
./dirty_log_perf_test -v64 -x1  | 15.18001570s     | 14.99469366s
./dirty_log_perf_test -v64 -x64 | 18.71336392s     | 14.98675076s
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20210804222844.1419481-6-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 081de470
...@@ -1044,17 +1044,6 @@ static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, ...@@ -1044,17 +1044,6 @@ static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
} }
static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
return __gfn_to_rmap(gfn, sp->role.level, slot);
}
static bool rmap_can_add(struct kvm_vcpu *vcpu) static bool rmap_can_add(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu_memory_cache *mc; struct kvm_mmu_memory_cache *mc;
...@@ -1065,24 +1054,39 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu) ...@@ -1065,24 +1054,39 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{ {
struct kvm_memory_slot *slot;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
sp = sptep_to_sp(spte); sp = sptep_to_sp(spte);
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
return pte_list_add(vcpu, spte, rmap_head); return pte_list_add(vcpu, spte, rmap_head);
} }
static void rmap_remove(struct kvm *kvm, u64 *spte) static void rmap_remove(struct kvm *kvm, u64 *spte)
{ {
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
gfn_t gfn; gfn_t gfn;
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
sp = sptep_to_sp(spte); sp = sptep_to_sp(spte);
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmap_head = gfn_to_rmap(kvm, gfn, sp);
/*
* Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
* context of a vCPU so have to determine which memslots to use based
* on context information in sp->role.
*/
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
__pte_list_remove(spte, rmap_head); __pte_list_remove(spte, rmap_head);
} }
...@@ -1620,12 +1624,13 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, ...@@ -1620,12 +1624,13 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{ {
struct kvm_memory_slot *slot;
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
sp = sptep_to_sp(spte); sp = sptep_to_sp(spte);
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment