Commit c1a33aeb authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: arm64: Use common KVM implementation of MMU memory caches

Move to the common MMU memory cache implementation now that the common
code and arm64's existing code are semantically compatible.

No functional change intended.

Cc: Marc Zyngier <maz@kernel.org>
Suggested-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-19-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e539451b
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h generic-y += early_ioremap.h
generic-y += kvm_types.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += qrwlock.h generic-y += qrwlock.h
......
...@@ -97,18 +97,6 @@ struct kvm_arch { ...@@ -97,18 +97,6 @@ struct kvm_arch {
bool return_nisv_io_abort_to_user; bool return_nisv_io_abort_to_user;
}; };
#define KVM_NR_MEM_OBJS 40
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
void *objects[KVM_NR_MEM_OBJS];
};
struct kvm_vcpu_fault_info { struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */ u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */ u64 far_el2; /* Hyp Fault Address Register */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM64_KVM_TYPES_H
#define _ASM_ARM64_KVM_TYPES_H
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
#endif /* _ASM_ARM64_KVM_TYPES_H */
...@@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) ...@@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
put_page(virt_to_page(pudp)); put_page(virt_to_page(pudp));
} }
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
{
void *page;
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT |
cache->gfp_zero);
if (!page)
return -ENOMEM;
cache->objects[cache->nobjs++] = page;
}
return 0;
}
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
while (mc->nobjs)
free_page((unsigned long)mc->objects[--mc->nobjs]);
}
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
void *p;
BUG_ON(!mc || !mc->nobjs);
p = mc->objects[--mc->nobjs];
return p;
}
static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
{ {
p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL); p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
...@@ -1131,7 +1100,7 @@ static p4d_t *stage2_get_p4d(struct kvm *kvm, struct kvm_mmu_memory_cache *cache ...@@ -1131,7 +1100,7 @@ static p4d_t *stage2_get_p4d(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
if (stage2_pgd_none(kvm, *pgd)) { if (stage2_pgd_none(kvm, *pgd)) {
if (!cache) if (!cache)
return NULL; return NULL;
p4d = mmu_memory_cache_alloc(cache); p4d = kvm_mmu_memory_cache_alloc(cache);
stage2_pgd_populate(kvm, pgd, p4d); stage2_pgd_populate(kvm, pgd, p4d);
get_page(virt_to_page(pgd)); get_page(virt_to_page(pgd));
} }
...@@ -1149,7 +1118,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache ...@@ -1149,7 +1118,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
if (stage2_p4d_none(kvm, *p4d)) { if (stage2_p4d_none(kvm, *p4d)) {
if (!cache) if (!cache)
return NULL; return NULL;
pud = mmu_memory_cache_alloc(cache); pud = kvm_mmu_memory_cache_alloc(cache);
stage2_p4d_populate(kvm, p4d, pud); stage2_p4d_populate(kvm, p4d, pud);
get_page(virt_to_page(p4d)); get_page(virt_to_page(p4d));
} }
...@@ -1170,7 +1139,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache ...@@ -1170,7 +1139,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
if (stage2_pud_none(kvm, *pud)) { if (stage2_pud_none(kvm, *pud)) {
if (!cache) if (!cache)
return NULL; return NULL;
pmd = mmu_memory_cache_alloc(cache); pmd = kvm_mmu_memory_cache_alloc(cache);
stage2_pud_populate(kvm, pud, pmd); stage2_pud_populate(kvm, pud, pmd);
get_page(virt_to_page(pud)); get_page(virt_to_page(pud));
} }
...@@ -1376,7 +1345,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, ...@@ -1376,7 +1345,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
if (stage2_pud_none(kvm, *pud)) { if (stage2_pud_none(kvm, *pud)) {
if (!cache) if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */ return 0; /* ignore calls from kvm_set_spte_hva */
pmd = mmu_memory_cache_alloc(cache); pmd = kvm_mmu_memory_cache_alloc(cache);
stage2_pud_populate(kvm, pud, pmd); stage2_pud_populate(kvm, pud, pmd);
get_page(virt_to_page(pud)); get_page(virt_to_page(pud));
} }
...@@ -1401,7 +1370,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, ...@@ -1401,7 +1370,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
if (!cache) if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */ return 0; /* ignore calls from kvm_set_spte_hva */
pte = mmu_memory_cache_alloc(cache); pte = kvm_mmu_memory_cache_alloc(cache);
kvm_pmd_populate(pmd, pte); kvm_pmd_populate(pmd, pte);
get_page(virt_to_page(pmd)); get_page(virt_to_page(pmd));
} }
...@@ -1468,7 +1437,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, ...@@ -1468,7 +1437,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t addr, end; phys_addr_t addr, end;
int ret = 0; int ret = 0;
unsigned long pfn; unsigned long pfn;
struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, }; struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(pa); pfn = __phys_to_pfn(pa);
...@@ -1479,7 +1448,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, ...@@ -1479,7 +1448,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
if (writable) if (writable)
pte = kvm_s2pte_mkwrite(pte); pte = kvm_s2pte_mkwrite(pte);
ret = mmu_topup_memory_cache(&cache, ret = kvm_mmu_topup_memory_cache(&cache,
kvm_mmu_cache_min_pages(kvm)); kvm_mmu_cache_min_pages(kvm));
if (ret) if (ret)
goto out; goto out;
...@@ -1494,7 +1463,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, ...@@ -1494,7 +1463,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
} }
out: out:
mmu_free_memory_cache(&cache); kvm_mmu_free_memory_cache(&cache);
return ret; return ret;
} }
...@@ -1880,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1880,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
/* We need minimum second+third level pages */ /* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm)); ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
if (ret) if (ret)
return ret; return ret;
...@@ -2303,7 +2272,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) ...@@ -2303,7 +2272,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{ {
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
} }
phys_addr_t kvm_mmu_get_httbr(void) phys_addr_t kvm_mmu_get_httbr(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment