Commit 4f802fe9 authored by Scott Wood's avatar Scott Wood Committed by Avi Kivity

KVM: PPC: e500: Track TLB1 entries with a bitmap

Rather than invalidate everything when a TLB1 entry needs to be
taken down, keep track of which host TLB1 entries are used for
a given guest TLB1 entry, and invalidate just those entries.

Based on code from Ashish Kalra <Ashish.Kalra@freescale.com>
and Liu Yu <yu.liu@freescale.com>.
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 8fdd21a2
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
* *
* Author: Yu Liu <yu.liu@freescale.com> * Author: Yu Liu <yu.liu@freescale.com>
* Ashish Kalra <ashish.kalra@freescale.com>
* *
* Description: * Description:
* This file is based on arch/powerpc/kvm/44x_tlb.h and * This file is based on arch/powerpc/kvm/44x_tlb.h and
...@@ -25,6 +26,7 @@ ...@@ -25,6 +26,7 @@
#define E500_TLB_VALID 1 #define E500_TLB_VALID 1
#define E500_TLB_DIRTY 2 #define E500_TLB_DIRTY 2
#define E500_TLB_BITMAP 4
struct tlbe_ref { struct tlbe_ref {
pfn_t pfn; pfn_t pfn;
...@@ -82,6 +84,9 @@ struct kvmppc_vcpu_e500 { ...@@ -82,6 +84,9 @@ struct kvmppc_vcpu_e500 {
struct page **shared_tlb_pages; struct page **shared_tlb_pages;
int num_shared_tlb_pages; int num_shared_tlb_pages;
u64 *g2h_tlb1_map;
unsigned int *h2g_tlb1_rmap;
#ifdef CONFIG_KVM_E500 #ifdef CONFIG_KVM_E500
u32 pid[E500_PID_NUM]; u32 pid[E500_PID_NUM];
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
* *
* Author: Yu Liu, yu.liu@freescale.com * Author: Yu Liu, yu.liu@freescale.com
* Ashish Kalra, ashish.kalra@freescale.com
* *
* Description: * Description:
* This file is based on arch/powerpc/kvm/44x_tlb.c, * This file is based on arch/powerpc/kvm/44x_tlb.c,
...@@ -175,8 +176,28 @@ static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -175,8 +176,28 @@ static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *gtlbe = struct kvm_book3e_206_tlb_entry *gtlbe =
get_entry(vcpu_e500, tlbsel, esel); get_entry(vcpu_e500, tlbsel, esel);
if (tlbsel == 1) { if (tlbsel == 1 &&
kvmppc_e500_tlbil_all(vcpu_e500); vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
int hw_tlb_indx;
unsigned long flags;
local_irq_save(flags);
while (tmp) {
hw_tlb_indx = __ilog2_u64(tmp & -tmp);
mtspr(SPRN_MAS0,
MAS0_TLBSEL(1) |
MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
mtspr(SPRN_MAS1, 0);
asm volatile("tlbwe");
vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
tmp &= tmp - 1;
}
mb();
vcpu_e500->g2h_tlb1_map[esel] = 0;
vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
local_irq_restore(flags);
return; return;
} }
...@@ -282,6 +303,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) ...@@ -282,6 +303,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
} }
} }
static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
{
if (vcpu_e500->g2h_tlb1_map)
memset(vcpu_e500->g2h_tlb1_map,
sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
if (vcpu_e500->h2g_tlb1_rmap)
memset(vcpu_e500->h2g_tlb1_rmap,
sizeof(unsigned int) * host_tlb_params[1].entries, 0);
}
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{ {
int tlbsel = 0; int tlbsel = 0;
...@@ -511,7 +542,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -511,7 +542,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/* XXX for both one-one and one-to-many , for now use TLB1 */ /* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe) struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{ {
struct tlbe_ref *ref; struct tlbe_ref *ref;
unsigned int victim; unsigned int victim;
...@@ -524,6 +555,14 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -524,6 +555,14 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
ref = &vcpu_e500->tlb_refs[1][victim]; ref = &vcpu_e500->tlb_refs[1][victim];
kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[victim]) {
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
}
vcpu_e500->h2g_tlb1_rmap[victim] = esel;
return victim; return victim;
} }
...@@ -728,7 +767,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) ...@@ -728,7 +767,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
* are mapped on the fly. */ * are mapped on the fly. */
stlbsel = 1; stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
raddr >> PAGE_SHIFT, gtlbe, &stlbe); raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
break; break;
default: default:
...@@ -856,7 +895,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, ...@@ -856,7 +895,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
stlbsel = 1; stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
gtlbe, &stlbe); gtlbe, &stlbe, esel);
break; break;
} }
...@@ -872,6 +911,9 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -872,6 +911,9 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
{ {
int i; int i;
clear_tlb1_bitmap(vcpu_e500);
kfree(vcpu_e500->g2h_tlb1_map);
clear_tlb_refs(vcpu_e500); clear_tlb_refs(vcpu_e500);
kfree(vcpu_e500->gtlb_priv[0]); kfree(vcpu_e500->gtlb_priv[0]);
kfree(vcpu_e500->gtlb_priv[1]); kfree(vcpu_e500->gtlb_priv[1]);
...@@ -932,6 +974,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, ...@@ -932,6 +974,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
char *virt; char *virt;
struct page **pages; struct page **pages;
struct tlbe_priv *privs[2] = {}; struct tlbe_priv *privs[2] = {};
u64 *g2h_bitmap = NULL;
size_t array_len; size_t array_len;
u32 sets; u32 sets;
int num_pages, ret, i; int num_pages, ret, i;
...@@ -993,10 +1036,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, ...@@ -993,10 +1036,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
if (!privs[0] || !privs[1]) if (!privs[0] || !privs[1])
goto err_put_page; goto err_put_page;
g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
GFP_KERNEL);
if (!g2h_bitmap)
goto err_put_page;
free_gtlb(vcpu_e500); free_gtlb(vcpu_e500);
vcpu_e500->gtlb_priv[0] = privs[0]; vcpu_e500->gtlb_priv[0] = privs[0];
vcpu_e500->gtlb_priv[1] = privs[1]; vcpu_e500->gtlb_priv[1] = privs[1];
vcpu_e500->g2h_tlb1_map = g2h_bitmap;
vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
(virt + (cfg->array & (PAGE_SIZE - 1))); (virt + (cfg->array & (PAGE_SIZE - 1)));
...@@ -1129,6 +1178,18 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -1129,6 +1178,18 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
if (!vcpu_e500->gtlb_priv[1]) if (!vcpu_e500->gtlb_priv[1])
goto err; goto err;
vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
vcpu_e500->gtlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->g2h_tlb1_map)
goto err;
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap)
goto err;
/* Init TLB configuration register */ /* Init TLB configuration register */
vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
...@@ -1154,6 +1215,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -1154,6 +1215,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{ {
free_gtlb(vcpu_e500); free_gtlb(vcpu_e500);
kfree(vcpu_e500->h2g_tlb1_rmap);
kfree(vcpu_e500->tlb_refs[0]); kfree(vcpu_e500->tlb_refs[0]);
kfree(vcpu_e500->tlb_refs[1]); kfree(vcpu_e500->tlb_refs[1]);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment