Commit fb58a9c3 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Optimize MMU page cache lookup for fully direct MMUs

Skip the unsync checks and the write flooding clearing for fully direct
MMUs, which are guaranteed to not have unsync'd or indirect pages (write
flooding detection only applies to indirect pages).  For TDP, this
avoids unnecessary memory reads and writes, and for the write flooding
count will also avoid dirtying a cache line (unsync_child_bitmap itself
consumes a cache line, i.e. write_flooding_count is guaranteed to be in
a different cache line than parent_ptes).
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200623194027.23135-3-sean.j.christopherson@intel.com>
Reviewed-By: default avatarJon Cargille <jcargill@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ac101b7c
...@@ -2460,6 +2460,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2460,6 +2460,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
int direct, int direct,
unsigned int access) unsigned int access)
{ {
bool direct_mmu = vcpu->arch.mmu->direct_map;
union kvm_mmu_page_role role; union kvm_mmu_page_role role;
struct hlist_head *sp_list; struct hlist_head *sp_list;
unsigned quadrant; unsigned quadrant;
...@@ -2475,8 +2476,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2475,8 +2476,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (role.direct) if (role.direct)
role.gpte_is_8_bytes = true; role.gpte_is_8_bytes = true;
role.access = access; role.access = access;
if (!vcpu->arch.mmu->direct_map if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
&& vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant; role.quadrant = quadrant;
...@@ -2495,6 +2495,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2495,6 +2495,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (sp->role.word != role.word) if (sp->role.word != role.word)
continue; continue;
if (direct_mmu)
goto trace_get_page;
if (sp->unsync) { if (sp->unsync) {
/* The page is good, but __kvm_sync_page might still end /* The page is good, but __kvm_sync_page might still end
* up zapping it. If so, break in order to rebuild it. * up zapping it. If so, break in order to rebuild it.
...@@ -2510,6 +2513,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2510,6 +2513,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
__clear_sp_write_flooding_count(sp); __clear_sp_write_flooding_count(sp);
trace_get_page:
trace_kvm_mmu_get_page(sp, false); trace_kvm_mmu_get_page(sp, false);
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment