Commit 2f8b1b53 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Pass memory caches to allocate SPs separately

Refactor kvm_mmu_alloc_shadow_page() to receive the caches from which it
will allocate the various pieces of memory for shadow pages as a
parameter, rather than deriving them from the vcpu pointer. This will be
useful in a future commit where shadow pages are allocated during VM
ioctls for eager page splitting, and thus will use a different set of
caches.

Preemptively pull the caches out all the way to
kvm_mmu_get_shadow_page() since eager page splitting will not be calling
kvm_mmu_alloc_shadow_page() directly.

No functional change intended.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220516232138.1783324-11-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent be911771
......@@ -2049,17 +2049,25 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
return sp;
}
/* Caches used when allocating a new shadow page. */
struct shadow_page_caches {
struct kvm_mmu_memory_cache *page_header_cache;
struct kvm_mmu_memory_cache *shadow_page_cache;
struct kvm_mmu_memory_cache *gfn_array_cache;
};
static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
struct shadow_page_caches *caches,
gfn_t gfn,
struct hlist_head *sp_list,
union kvm_mmu_page_role role)
{
struct kvm_mmu_page *sp;
sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
if (!role.direct)
sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
sp->gfns = kvm_mmu_memory_cache_alloc(caches->gfn_array_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
......@@ -2081,7 +2089,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
return sp;
}
static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
struct shadow_page_caches *caches,
gfn_t gfn,
union kvm_mmu_page_role role)
{
......@@ -2094,13 +2103,26 @@ static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
sp = kvm_mmu_find_shadow_page(vcpu, gfn, sp_list, role);
if (!sp) {
created = true;
sp = kvm_mmu_alloc_shadow_page(vcpu, gfn, sp_list, role);
sp = kvm_mmu_alloc_shadow_page(vcpu, caches, gfn, sp_list, role);
}
trace_kvm_mmu_get_page(sp, created);
return sp;
}
static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
union kvm_mmu_page_role role)
{
struct shadow_page_caches caches = {
.page_header_cache = &vcpu->arch.mmu_page_header_cache,
.shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
.gfn_array_cache = &vcpu->arch.mmu_gfn_array_cache,
};
return __kvm_mmu_get_shadow_page(vcpu, &caches, gfn, role);
}
static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, unsigned int access)
{
struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment