Commit cc4f602b authored by Sean Christopherson's avatar Sean Christopherson Committed by Anup Patel

KVM: RISC-V: Use common KVM implementation of MMU memory caches

Use common KVM's implementation of the MMU memory caches, which for all
intents and purposes is semantically identical to RISC-V's version, the
only difference being that the common implementation will fall back to an
atomic allocation if there's a KVM bug that triggers a cache underflow.

RISC-V appears to have based its MMU code on arm64 before the conversion
to the common caches in commit c1a33aeb ("KVM: arm64: Use common KVM
implementation of MMU memory caches"), despite having also copy-pasted
the definition of KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE in kvm_types.h.

Opportunistically drop the superfluous wrapper
kvm_riscv_stage2_flush_cache(), whose name is very, very confusing as
"cache flush" in the context of MMU code almost always refers to flushing
hardware caches, not freeing unused software objects.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarAnup Patel <anup.patel@wdc.com>
parent 5e4e84f1
...@@ -77,13 +77,6 @@ struct kvm_sbi_context { ...@@ -77,13 +77,6 @@ struct kvm_sbi_context {
int return_handled; int return_handled;
}; };
#define KVM_MMU_PAGE_CACHE_NR_OBJS 32
struct kvm_mmu_page_cache {
int nobjs;
void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS];
};
struct kvm_cpu_trap { struct kvm_cpu_trap {
unsigned long sepc; unsigned long sepc;
unsigned long scause; unsigned long scause;
...@@ -193,7 +186,7 @@ struct kvm_vcpu_arch { ...@@ -193,7 +186,7 @@ struct kvm_vcpu_arch {
struct kvm_sbi_context sbi_context; struct kvm_sbi_context sbi_context;
/* Cache pages needed to program page tables with spinlock held */ /* Cache pages needed to program page tables with spinlock held */
struct kvm_mmu_page_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
/* VCPU power-off state */ /* VCPU power-off state */
bool power_off; bool power_off;
...@@ -220,7 +213,6 @@ void __kvm_riscv_hfence_gvma_all(void); ...@@ -220,7 +213,6 @@ void __kvm_riscv_hfence_gvma_all(void);
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write); gpa_t gpa, unsigned long hva, bool is_write);
void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu);
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);
......
...@@ -2,6 +2,6 @@ ...@@ -2,6 +2,6 @@
#ifndef _ASM_RISCV_KVM_TYPES_H #ifndef _ASM_RISCV_KVM_TYPES_H
#define _ASM_RISCV_KVM_TYPES_H #define _ASM_RISCV_KVM_TYPES_H
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 #define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 32
#endif /* _ASM_RISCV_KVM_TYPES_H */ #endif /* _ASM_RISCV_KVM_TYPES_H */
...@@ -83,43 +83,6 @@ static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize) ...@@ -83,43 +83,6 @@ static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize)
return 0; return 0;
} }
static int stage2_cache_topup(struct kvm_mmu_page_cache *pcache,
int min, int max)
{
void *page;
BUG_ON(max > KVM_MMU_PAGE_CACHE_NR_OBJS);
if (pcache->nobjs >= min)
return 0;
while (pcache->nobjs < max) {
page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return -ENOMEM;
pcache->objects[pcache->nobjs++] = page;
}
return 0;
}
static void stage2_cache_flush(struct kvm_mmu_page_cache *pcache)
{
while (pcache && pcache->nobjs)
free_page((unsigned long)pcache->objects[--pcache->nobjs]);
}
static void *stage2_cache_alloc(struct kvm_mmu_page_cache *pcache)
{
void *p;
if (!pcache)
return NULL;
BUG_ON(!pcache->nobjs);
p = pcache->objects[--pcache->nobjs];
return p;
}
static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr, static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
pte_t **ptepp, u32 *ptep_level) pte_t **ptepp, u32 *ptep_level)
{ {
...@@ -171,7 +134,7 @@ static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) ...@@ -171,7 +134,7 @@ static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
} }
static int stage2_set_pte(struct kvm *kvm, u32 level, static int stage2_set_pte(struct kvm *kvm, u32 level,
struct kvm_mmu_page_cache *pcache, struct kvm_mmu_memory_cache *pcache,
gpa_t addr, const pte_t *new_pte) gpa_t addr, const pte_t *new_pte)
{ {
u32 current_level = stage2_pgd_levels - 1; u32 current_level = stage2_pgd_levels - 1;
...@@ -186,7 +149,9 @@ static int stage2_set_pte(struct kvm *kvm, u32 level, ...@@ -186,7 +149,9 @@ static int stage2_set_pte(struct kvm *kvm, u32 level,
return -EEXIST; return -EEXIST;
if (!pte_val(*ptep)) { if (!pte_val(*ptep)) {
next_ptep = stage2_cache_alloc(pcache); if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep) if (!next_ptep)
return -ENOMEM; return -ENOMEM;
*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)), *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
...@@ -209,7 +174,7 @@ static int stage2_set_pte(struct kvm *kvm, u32 level, ...@@ -209,7 +174,7 @@ static int stage2_set_pte(struct kvm *kvm, u32 level,
} }
static int stage2_map_page(struct kvm *kvm, static int stage2_map_page(struct kvm *kvm,
struct kvm_mmu_page_cache *pcache, struct kvm_mmu_memory_cache *pcache,
gpa_t gpa, phys_addr_t hpa, gpa_t gpa, phys_addr_t hpa,
unsigned long page_size, unsigned long page_size,
bool page_rdonly, bool page_exec) bool page_rdonly, bool page_exec)
...@@ -384,7 +349,10 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, ...@@ -384,7 +349,10 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
int ret = 0; int ret = 0;
unsigned long pfn; unsigned long pfn;
phys_addr_t addr, end; phys_addr_t addr, end;
struct kvm_mmu_page_cache pcache = { 0, }; struct kvm_mmu_memory_cache pcache;
memset(&pcache, 0, sizeof(pcache));
pcache.gfp_zero = __GFP_ZERO;
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa); pfn = __phys_to_pfn(hpa);
...@@ -395,9 +363,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, ...@@ -395,9 +363,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
if (!writable) if (!writable)
pte = pte_wrprotect(pte); pte = pte_wrprotect(pte);
ret = stage2_cache_topup(&pcache, ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels);
stage2_pgd_levels,
KVM_MMU_PAGE_CACHE_NR_OBJS);
if (ret) if (ret)
goto out; goto out;
...@@ -411,7 +377,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, ...@@ -411,7 +377,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
} }
out: out:
stage2_cache_flush(&pcache); kvm_mmu_free_memory_cache(&pcache);
return ret; return ret;
} }
...@@ -649,7 +615,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, ...@@ -649,7 +615,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache; struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
bool logging = (memslot->dirty_bitmap && bool logging = (memslot->dirty_bitmap &&
!(memslot->flags & KVM_MEM_READONLY)) ? true : false; !(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq; unsigned long vma_pagesize, mmu_seq;
...@@ -684,8 +650,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, ...@@ -684,8 +650,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
} }
/* We need minimum second+third level pages */ /* We need minimum second+third level pages */
ret = stage2_cache_topup(pcache, stage2_pgd_levels, ret = kvm_mmu_topup_memory_cache(pcache, stage2_pgd_levels);
KVM_MMU_PAGE_CACHE_NR_OBJS);
if (ret) { if (ret) {
kvm_err("Failed to topup stage2 cache\n"); kvm_err("Failed to topup stage2 cache\n");
return ret; return ret;
...@@ -734,11 +699,6 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, ...@@ -734,11 +699,6 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
return ret; return ret;
} }
void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu)
{
stage2_cache_flush(&vcpu->arch.mmu_page_cache);
}
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm) int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
{ {
struct page *pgd_page; struct page *pgd_page;
......
...@@ -77,6 +77,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -77,6 +77,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Mark this VCPU never ran */ /* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false; vcpu->arch.ran_atleast_once = false;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
/* Setup ISA features available to VCPU */ /* Setup ISA features available to VCPU */
vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED; vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
...@@ -107,8 +108,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -107,8 +108,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
/* Cleanup VCPU timer */ /* Cleanup VCPU timer */
kvm_riscv_vcpu_timer_deinit(vcpu); kvm_riscv_vcpu_timer_deinit(vcpu);
/* Flush the pages pre-allocated for Stage2 page table mappings */ /* Free unused pages pre-allocated for Stage2 page table mappings */
kvm_riscv_stage2_flush_cache(vcpu); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
} }
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment