Commit 171a90d7 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Separate the memory caches for shadow pages and gfn arrays

Use separate caches for allocating shadow pages versus gfn arrays.  This
sets the stage for specifying __GFP_ZERO when allocating shadow pages
without incurring extra cost for gfn arrays.

No functional change intended.
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-10-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 531281ad
...@@ -602,7 +602,8 @@ struct kvm_vcpu_arch { ...@@ -602,7 +602,8 @@ struct kvm_vcpu_arch {
struct kvm_mmu *walk_mmu; struct kvm_mmu *walk_mmu;
struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_shadow_page_cache;
struct kvm_mmu_memory_cache mmu_gfn_array_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache; struct kvm_mmu_memory_cache mmu_page_header_cache;
/* /*
......
...@@ -1109,8 +1109,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) ...@@ -1109,8 +1109,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM); 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
if (r) if (r)
return r; return r;
r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_cache, r = mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
2 * PT64_ROOT_MAX_LEVEL); PT64_ROOT_MAX_LEVEL);
if (r)
return r;
r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
PT64_ROOT_MAX_LEVEL);
if (r) if (r)
return r; return r;
return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
...@@ -1120,7 +1124,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) ...@@ -1120,7 +1124,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{ {
mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
} }
...@@ -2082,9 +2087,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct ...@@ -2082,9 +2087,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
if (!direct) if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp); set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment