Commit 378f5cd6 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups

Don't bother filling the gfn array cache when the caller is a fully
direct MMU, i.e. won't need a gfn array for shadow pages.
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-13-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 96880883
...@@ -1102,7 +1102,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) ...@@ -1102,7 +1102,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
} }
} }
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
{ {
int r; int r;
...@@ -1115,10 +1115,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) ...@@ -1115,10 +1115,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
PT64_ROOT_MAX_LEVEL); PT64_ROOT_MAX_LEVEL);
if (r) if (r)
return r; return r;
r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, if (maybe_indirect) {
PT64_ROOT_MAX_LEVEL); r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
if (r) PT64_ROOT_MAX_LEVEL);
return r; if (r)
return r;
}
return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
PT64_ROOT_MAX_LEVEL); PT64_ROOT_MAX_LEVEL);
} }
...@@ -4132,7 +4134,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4132,7 +4134,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (fast_page_fault(vcpu, gpa, error_code)) if (fast_page_fault(vcpu, gpa, error_code))
return RET_PF_RETRY; return RET_PF_RETRY;
r = mmu_topup_memory_caches(vcpu); r = mmu_topup_memory_caches(vcpu, false);
if (r) if (r)
return r; return r;
...@@ -5168,7 +5170,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -5168,7 +5170,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
{ {
int r; int r;
r = mmu_topup_memory_caches(vcpu); r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
if (r) if (r)
goto out; goto out;
r = mmu_alloc_roots(vcpu); r = mmu_alloc_roots(vcpu);
...@@ -5362,7 +5364,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -5362,7 +5364,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
* or not since pte prefetch is skiped if it does not have * or not since pte prefetch is skiped if it does not have
* enough objects in the cache. * enough objects in the cache.
*/ */
mmu_topup_memory_caches(vcpu); mmu_topup_memory_caches(vcpu, true);
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
......
...@@ -816,7 +816,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -816,7 +816,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
return RET_PF_EMULATE; return RET_PF_EMULATE;
} }
r = mmu_topup_memory_caches(vcpu); r = mmu_topup_memory_caches(vcpu, true);
if (r) if (r)
return r; return r;
...@@ -904,7 +904,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) ...@@ -904,7 +904,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
* No need to check return value here, rmap_can_add() can * No need to check return value here, rmap_can_add() can
* help us to skip pte prefetch later. * help us to skip pte prefetch later.
*/ */
mmu_topup_memory_caches(vcpu); mmu_topup_memory_caches(vcpu, true);
if (!VALID_PAGE(root_hpa)) { if (!VALID_PAGE(root_hpa)) {
WARN_ON(1); WARN_ON(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment