Commit aba85929 authored by James Hogan's avatar James Hogan

KVM: MIPS/MMU: Invalidate stale GVA PTEs on TLBW

Implement invalidation of specific pairs of GVA page table entries in
one or both of the GVA page tables. This is used when existing mappings
are replaced in the guest TLB by emulated TLBWI/TLBWR instructions. Due
to the sharing of page tables in the host kernel range, we should be
careful not to allow host pages to be invalidated.

Add a helper kvm_mips_walk_pgd() which can be used when walking of
either GPA (future patches) or GVA page tables is needed, optionally
with allocation of page tables along the way when they don't exist.

GPA page table walking will need to be protected by the kvm->mmu_lock,
so we also add a small MMU page cache in each KVM VCPU, like that found
for other architectures but smaller. This allows enough pages to be
pre-allocated to handle a single fault without holding the lock,
allowing the helper to run with the lock held without having to handle
allocation failures.

Using the same mechanism for GVA allows the same code to be used, and
allows it to use the same cache of allocated pages if the GPA walk
didn't need to allocate any new tables.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent a31b50d7
...@@ -261,6 +261,17 @@ struct kvm_mips_tlb { ...@@ -261,6 +261,17 @@ struct kvm_mips_tlb {
long tlb_lo[2]; long tlb_lo[2];
}; };
#define KVM_NR_MEM_OBJS 4
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_mmu_memory_cache {
int nobjs;
void *objects[KVM_NR_MEM_OBJS];
};
#define KVM_MIPS_AUX_FPU 0x1 #define KVM_MIPS_AUX_FPU 0x1
#define KVM_MIPS_AUX_MSA 0x2 #define KVM_MIPS_AUX_MSA 0x2
...@@ -327,6 +338,9 @@ struct kvm_vcpu_arch { ...@@ -327,6 +338,9 @@ struct kvm_vcpu_arch {
/* Guest ASID of last user mode execution */ /* Guest ASID of last user mode execution */
unsigned int last_user_gasid; unsigned int last_user_gasid;
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
int last_sched_cpu; int last_sched_cpu;
/* WAIT executed */ /* WAIT executed */
...@@ -631,6 +645,9 @@ enum kvm_mips_flush { ...@@ -631,6 +645,9 @@ enum kvm_mips_flush {
KMF_GPA = 0x2, KMF_GPA = 0x2,
}; };
void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
bool user);
extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
unsigned long gva); unsigned long gva);
extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
......
...@@ -864,11 +864,17 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, ...@@ -864,11 +864,17 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
/* No need to flush for entries which are already invalid */ /* No need to flush for entries which are already invalid */
if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
return; return;
/* Don't touch host kernel page tables or TLB mappings */
if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
return;
/* User address space doesn't need flushing for KSeg2/3 changes */ /* User address space doesn't need flushing for KSeg2/3 changes */
user = tlb->tlb_hi < KVM_GUEST_KSEG0; user = tlb->tlb_hi < KVM_GUEST_KSEG0;
preempt_disable(); preempt_disable();
/* Invalidate page table entries */
kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
/* /*
* Probe the shadow host TLB for the entry being overwritten, if one * Probe the shadow host TLB for the entry being overwritten, if one
* matches, invalidate it * matches, invalidate it
......
...@@ -396,6 +396,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -396,6 +396,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_mips_dump_stats(vcpu); kvm_mips_dump_stats(vcpu);
kvm_mmu_free_memory_caches(vcpu);
kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.guest_ebase);
kfree(vcpu->arch.kseg0_commpage); kfree(vcpu->arch.kseg0_commpage);
kfree(vcpu); kfree(vcpu);
......
...@@ -14,6 +14,26 @@ ...@@ -14,6 +14,26 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
while (mc->nobjs)
free_page((unsigned long)mc->objects[--mc->nobjs]);
}
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
void *p;
BUG_ON(!mc || !mc->nobjs);
p = mc->objects[--mc->nobjs];
return p;
}
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
...@@ -30,6 +50,56 @@ static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) ...@@ -30,6 +50,56 @@ static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
return cpu_asid(cpu, user_mm); return cpu_asid(cpu, user_mm);
} }
/**
* kvm_mips_walk_pgd() - Walk page table with optional allocation.
* @pgd: Page directory pointer.
* @addr: Address to index page table using.
* @cache: MMU page cache to allocate new page tables from, or NULL.
*
* Walk the page tables pointed to by @pgd to find the PTE corresponding to the
* address @addr. If page tables don't exist for @addr, they will be created
* from the MMU cache if @cache is not NULL.
*
* Returns: Pointer to pte_t corresponding to @addr.
* NULL if a page table doesn't exist for @addr and !@cache.
* NULL if a page table allocation failed.
*/
static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
unsigned long addr)
{
pud_t *pud;
pmd_t *pmd;
pgd += pgd_index(addr);
if (pgd_none(*pgd)) {
/* Not used on MIPS yet */
BUG();
return NULL;
}
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
pmd_t *new_pmd;
if (!cache)
return NULL;
new_pmd = mmu_memory_cache_alloc(cache);
pmd_init((unsigned long)new_pmd,
(unsigned long)invalid_pte_table);
pud_populate(NULL, pud, new_pmd);
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
pte_t *new_pte;
if (!cache)
return NULL;
new_pte = mmu_memory_cache_alloc(cache);
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
return pte_offset(pmd, addr);
}
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
{ {
int srcu_idx, err = 0; int srcu_idx, err = 0;
...@@ -81,6 +151,31 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, ...@@ -81,6 +151,31 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
} }
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
bool user)
{
pgd_t *pgdp;
pte_t *ptep;
addr &= PAGE_MASK << 1;
pgdp = vcpu->arch.guest_kernel_mm.pgd;
ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
if (ptep) {
ptep[0] = pfn_pte(0, __pgprot(0));
ptep[1] = pfn_pte(0, __pgprot(0));
}
if (user) {
pgdp = vcpu->arch.guest_user_mm.pgd;
ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
if (ptep) {
ptep[0] = pfn_pte(0, __pgprot(0));
ptep[1] = pfn_pte(0, __pgprot(0));
}
}
}
/* /*
* kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}. * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
* Flush a range of guest physical address space from the VM's GPA page tables. * Flush a range of guest physical address space from the VM's GPA page tables.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment