Commit 3f9d4f5a authored by David Gibson's avatar David Gibson Committed by Paul Mackerras

KVM: PPC: Book3S HV: Gather HPT related variables into sub-structure

Currently, the powerpc kvm_arch structure contains a number of variables
tracking the state of the guest's hashed page table (HPT) in KVM HV.  This
patch gathers them all together into a single kvm_hpt_info substructure.
This makes life more convenient for the upcoming HPT resizing
implementation.
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent db9a290d
...@@ -241,12 +241,24 @@ struct kvm_arch_memory_slot { ...@@ -241,12 +241,24 @@ struct kvm_arch_memory_slot {
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
}; };
struct kvm_hpt_info {
/* Host virtual (linear mapping) address of guest HPT */
unsigned long virt;
/* Array of reverse mapping entries for each guest HPTE */
struct revmap_entry *rev;
unsigned long npte;
unsigned long mask;
/* Guest HPT size is 2**(order) bytes */
u32 order;
/* 1 if HPT allocated with CMA, 0 otherwise */
int cma;
};
struct kvm_arch { struct kvm_arch {
unsigned int lpid; unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned int tlb_sets; unsigned int tlb_sets;
unsigned long hpt_virt; struct kvm_hpt_info hpt;
struct revmap_entry *revmap;
atomic64_t mmio_update; atomic64_t mmio_update;
unsigned int host_lpid; unsigned int host_lpid;
unsigned long host_lpcr; unsigned long host_lpcr;
...@@ -256,15 +268,11 @@ struct kvm_arch { ...@@ -256,15 +268,11 @@ struct kvm_arch {
unsigned long lpcr; unsigned long lpcr;
unsigned long vrma_slb_v; unsigned long vrma_slb_v;
int hpte_setup_done; int hpte_setup_done;
u32 hpt_order;
atomic_t vcpus_running; atomic_t vcpus_running;
u32 online_vcores; u32 online_vcores;
unsigned long hpt_npte;
unsigned long hpt_mask;
atomic_t hpte_mod_interest; atomic_t hpte_mod_interest;
cpumask_t need_tlb_flush; cpumask_t need_tlb_flush;
cpumask_t cpu_in_guest; cpumask_t cpu_in_guest;
int hpt_cma_alloc;
u8 radix; u8 radix;
pgd_t *pgtable; pgd_t *pgtable;
u64 process_table; u64 process_table;
......
...@@ -61,12 +61,12 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) ...@@ -61,12 +61,12 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
order = PPC_MIN_HPT_ORDER; order = PPC_MIN_HPT_ORDER;
} }
kvm->arch.hpt_cma_alloc = 0; kvm->arch.hpt.cma = 0;
page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
if (page) { if (page) {
hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
memset((void *)hpt, 0, (1ul << order)); memset((void *)hpt, 0, (1ul << order));
kvm->arch.hpt_cma_alloc = 1; kvm->arch.hpt.cma = 1;
} }
/* Lastly try successively smaller sizes from the page allocator */ /* Lastly try successively smaller sizes from the page allocator */
...@@ -81,22 +81,22 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) ...@@ -81,22 +81,22 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
if (!hpt) if (!hpt)
return -ENOMEM; return -ENOMEM;
kvm->arch.hpt_virt = hpt; kvm->arch.hpt.virt = hpt;
kvm->arch.hpt_order = order; kvm->arch.hpt.order = order;
/* HPTEs are 2**4 bytes long */ /* HPTEs are 2**4 bytes long */
kvm->arch.hpt_npte = 1ul << (order - 4); kvm->arch.hpt.npte = 1ul << (order - 4);
/* 128 (2**7) bytes in each HPTEG */ /* 128 (2**7) bytes in each HPTEG */
kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; kvm->arch.hpt.mask = (1ul << (order - 7)) - 1;
atomic64_set(&kvm->arch.mmio_update, 0); atomic64_set(&kvm->arch.mmio_update, 0);
/* Allocate reverse map array */ /* Allocate reverse map array */
rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt.npte);
if (!rev) { if (!rev) {
pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
goto out_freehpt; goto out_freehpt;
} }
kvm->arch.revmap = rev; kvm->arch.hpt.rev = rev;
kvm->arch.sdr1 = __pa(hpt) | (order - 18); kvm->arch.sdr1 = __pa(hpt) | (order - 18);
pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
...@@ -107,7 +107,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) ...@@ -107,7 +107,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
return 0; return 0;
out_freehpt: out_freehpt:
if (kvm->arch.hpt_cma_alloc) if (kvm->arch.hpt.cma)
kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
else else
free_pages(hpt, order - PAGE_SHIFT); free_pages(hpt, order - PAGE_SHIFT);
...@@ -132,10 +132,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) ...@@ -132,10 +132,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
goto out; goto out;
} }
} }
if (kvm->arch.hpt_virt) { if (kvm->arch.hpt.virt) {
order = kvm->arch.hpt_order; order = kvm->arch.hpt.order;
/* Set the entire HPT to 0, i.e. invalid HPTEs */ /* Set the entire HPT to 0, i.e. invalid HPTEs */
memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
/* /*
* Reset all the reverse-mapping chains for all memslots * Reset all the reverse-mapping chains for all memslots
*/ */
...@@ -155,13 +155,13 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) ...@@ -155,13 +155,13 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
void kvmppc_free_hpt(struct kvm *kvm) void kvmppc_free_hpt(struct kvm *kvm)
{ {
vfree(kvm->arch.revmap); vfree(kvm->arch.hpt.rev);
if (kvm->arch.hpt_cma_alloc) if (kvm->arch.hpt.cma)
kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt_virt), kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt.virt),
1 << (kvm->arch.hpt_order - PAGE_SHIFT)); 1 << (kvm->arch.hpt.order - PAGE_SHIFT));
else if (kvm->arch.hpt_virt) else if (kvm->arch.hpt.virt)
free_pages(kvm->arch.hpt_virt, free_pages(kvm->arch.hpt.virt,
kvm->arch.hpt_order - PAGE_SHIFT); kvm->arch.hpt.order - PAGE_SHIFT);
} }
/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
...@@ -196,8 +196,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, ...@@ -196,8 +196,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
if (npages > 1ul << (40 - porder)) if (npages > 1ul << (40 - porder))
npages = 1ul << (40 - porder); npages = 1ul << (40 - porder);
/* Can't use more than 1 HPTE per HPTEG */ /* Can't use more than 1 HPTE per HPTEG */
if (npages > kvm->arch.hpt_mask + 1) if (npages > kvm->arch.hpt.mask + 1)
npages = kvm->arch.hpt_mask + 1; npages = kvm->arch.hpt.mask + 1;
hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
...@@ -207,7 +207,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, ...@@ -207,7 +207,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
for (i = 0; i < npages; ++i) { for (i = 0; i < npages; ++i) {
addr = i << porder; addr = i << porder;
/* can't use hpt_hash since va > 64 bits */ /* can't use hpt_hash since va > 64 bits */
hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt.mask;
/* /*
* We assume that the hash table is empty and no * We assume that the hash table is empty and no
* vcpus are using it at this stage. Since we create * vcpus are using it at this stage. Since we create
...@@ -340,11 +340,11 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -340,11 +340,11 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
preempt_enable(); preempt_enable();
return -ENOENT; return -ENOENT;
} }
hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
if (cpu_has_feature(CPU_FTR_ARCH_300)) if (cpu_has_feature(CPU_FTR_ARCH_300))
v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1])); v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
gr = kvm->arch.revmap[index].guest_rpte; gr = kvm->arch.hpt.rev[index].guest_rpte;
unlock_hpte(hptep, orig_v); unlock_hpte(hptep, orig_v);
preempt_enable(); preempt_enable();
...@@ -485,8 +485,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -485,8 +485,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
} }
index = vcpu->arch.pgfault_index; index = vcpu->arch.pgfault_index;
hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
rev = &kvm->arch.revmap[index]; rev = &kvm->arch.hpt.rev[index];
preempt_disable(); preempt_disable();
while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
cpu_relax(); cpu_relax();
...@@ -748,7 +748,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, ...@@ -748,7 +748,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
struct revmap_entry *rev = kvm->arch.revmap; struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long h, i, j; unsigned long h, i, j;
__be64 *hptep; __be64 *hptep;
unsigned long ptel, psize, rcbits; unsigned long ptel, psize, rcbits;
...@@ -768,7 +768,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -768,7 +768,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
* rmap chain lock. * rmap chain lock.
*/ */
i = *rmapp & KVMPPC_RMAP_INDEX; i = *rmapp & KVMPPC_RMAP_INDEX;
hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */ /* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp); unlock_rmap(rmapp);
...@@ -860,7 +860,7 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm, ...@@ -860,7 +860,7 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
struct revmap_entry *rev = kvm->arch.revmap; struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j; unsigned long head, i, j;
__be64 *hptep; __be64 *hptep;
int ret = 0; int ret = 0;
...@@ -880,7 +880,7 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -880,7 +880,7 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
i = head = *rmapp & KVMPPC_RMAP_INDEX; i = head = *rmapp & KVMPPC_RMAP_INDEX;
do { do {
hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
j = rev[i].forw; j = rev[i].forw;
/* If this HPTE isn't referenced, ignore it */ /* If this HPTE isn't referenced, ignore it */
...@@ -923,7 +923,7 @@ int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end) ...@@ -923,7 +923,7 @@ int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
struct revmap_entry *rev = kvm->arch.revmap; struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j; unsigned long head, i, j;
unsigned long *hp; unsigned long *hp;
int ret = 1; int ret = 1;
...@@ -940,7 +940,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -940,7 +940,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (*rmapp & KVMPPC_RMAP_PRESENT) { if (*rmapp & KVMPPC_RMAP_PRESENT) {
i = head = *rmapp & KVMPPC_RMAP_INDEX; i = head = *rmapp & KVMPPC_RMAP_INDEX;
do { do {
hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4));
j = rev[i].forw; j = rev[i].forw;
if (be64_to_cpu(hp[1]) & HPTE_R_R) if (be64_to_cpu(hp[1]) & HPTE_R_R)
goto out; goto out;
...@@ -980,7 +980,7 @@ static int vcpus_running(struct kvm *kvm) ...@@ -980,7 +980,7 @@ static int vcpus_running(struct kvm *kvm)
*/ */
static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
{ {
struct revmap_entry *rev = kvm->arch.revmap; struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j; unsigned long head, i, j;
unsigned long n; unsigned long n;
unsigned long v, r; unsigned long v, r;
...@@ -1005,7 +1005,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) ...@@ -1005,7 +1005,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
i = head = *rmapp & KVMPPC_RMAP_INDEX; i = head = *rmapp & KVMPPC_RMAP_INDEX;
do { do {
unsigned long hptep1; unsigned long hptep1;
hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
j = rev[i].forw; j = rev[i].forw;
/* /*
...@@ -1311,8 +1311,8 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, ...@@ -1311,8 +1311,8 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
flags = ctx->flags; flags = ctx->flags;
i = ctx->index; i = ctx->index;
hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
revp = kvm->arch.revmap + i; revp = kvm->arch.hpt.rev + i;
lbuf = (unsigned long __user *)buf; lbuf = (unsigned long __user *)buf;
nb = 0; nb = 0;
...@@ -1327,7 +1327,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, ...@@ -1327,7 +1327,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
/* Skip uninteresting entries, i.e. clean on not-first pass */ /* Skip uninteresting entries, i.e. clean on not-first pass */
if (!first_pass) { if (!first_pass) {
while (i < kvm->arch.hpt_npte && while (i < kvm->arch.hpt.npte &&
!hpte_dirty(revp, hptp)) { !hpte_dirty(revp, hptp)) {
++i; ++i;
hptp += 2; hptp += 2;
...@@ -1337,7 +1337,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, ...@@ -1337,7 +1337,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
hdr.index = i; hdr.index = i;
/* Grab a series of valid entries */ /* Grab a series of valid entries */
while (i < kvm->arch.hpt_npte && while (i < kvm->arch.hpt.npte &&
hdr.n_valid < 0xffff && hdr.n_valid < 0xffff &&
nb + HPTE_SIZE < count && nb + HPTE_SIZE < count &&
record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
...@@ -1353,7 +1353,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, ...@@ -1353,7 +1353,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
++revp; ++revp;
} }
/* Now skip invalid entries while we can */ /* Now skip invalid entries while we can */
while (i < kvm->arch.hpt_npte && while (i < kvm->arch.hpt.npte &&
hdr.n_invalid < 0xffff && hdr.n_invalid < 0xffff &&
record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
/* found an invalid entry */ /* found an invalid entry */
...@@ -1374,7 +1374,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, ...@@ -1374,7 +1374,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
} }
/* Check if we've wrapped around the hash table */ /* Check if we've wrapped around the hash table */
if (i >= kvm->arch.hpt_npte) { if (i >= kvm->arch.hpt.npte) {
i = 0; i = 0;
ctx->first_pass = 0; ctx->first_pass = 0;
break; break;
...@@ -1433,11 +1433,11 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1433,11 +1433,11 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
err = -EINVAL; err = -EINVAL;
i = hdr.index; i = hdr.index;
if (i >= kvm->arch.hpt_npte || if (i >= kvm->arch.hpt.npte ||
i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt.npte)
break; break;
hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
lbuf = (unsigned long __user *)buf; lbuf = (unsigned long __user *)buf;
for (j = 0; j < hdr.n_valid; ++j) { for (j = 0; j < hdr.n_valid; ++j) {
__be64 hpte_v; __be64 hpte_v;
...@@ -1624,8 +1624,8 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf, ...@@ -1624,8 +1624,8 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
kvm = p->kvm; kvm = p->kvm;
i = p->hpt_index; i = p->hpt_index;
hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) { for (; len != 0 && i < kvm->arch.hpt.npte; ++i, hptp += 2) {
if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))) if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
continue; continue;
...@@ -1635,7 +1635,7 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf, ...@@ -1635,7 +1635,7 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
cpu_relax(); cpu_relax();
v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK; v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
hr = be64_to_cpu(hptp[1]); hr = be64_to_cpu(hptp[1]);
gr = kvm->arch.revmap[i].guest_rpte; gr = kvm->arch.hpt.rev[i].guest_rpte;
unlock_hpte(hptp, v); unlock_hpte(hptp, v);
preempt_enable(); preempt_enable();
......
...@@ -3197,7 +3197,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) ...@@ -3197,7 +3197,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
goto out; /* another vcpu beat us to it */ goto out; /* another vcpu beat us to it */
/* Allocate hashed page table (if not done already) and reset it */ /* Allocate hashed page table (if not done already) and reset it */
if (!kvm->arch.hpt_virt) { if (!kvm->arch.hpt.virt) {
err = kvmppc_alloc_hpt(kvm, NULL); err = kvmppc_alloc_hpt(kvm, NULL);
if (err) { if (err) {
pr_err("KVM: Couldn't alloc HPT\n"); pr_err("KVM: Couldn't alloc HPT\n");
......
...@@ -86,10 +86,10 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, ...@@ -86,10 +86,10 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
if (*rmap & KVMPPC_RMAP_PRESENT) { if (*rmap & KVMPPC_RMAP_PRESENT) {
i = *rmap & KVMPPC_RMAP_INDEX; i = *rmap & KVMPPC_RMAP_INDEX;
head = &kvm->arch.revmap[i]; head = &kvm->arch.hpt.rev[i];
if (realmode) if (realmode)
head = real_vmalloc_addr(head); head = real_vmalloc_addr(head);
tail = &kvm->arch.revmap[head->back]; tail = &kvm->arch.hpt.rev[head->back];
if (realmode) if (realmode)
tail = real_vmalloc_addr(tail); tail = real_vmalloc_addr(tail);
rev->forw = i; rev->forw = i;
...@@ -154,8 +154,8 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, ...@@ -154,8 +154,8 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
lock_rmap(rmap); lock_rmap(rmap);
head = *rmap & KVMPPC_RMAP_INDEX; head = *rmap & KVMPPC_RMAP_INDEX;
next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
next->back = rev->back; next->back = rev->back;
prev->forw = rev->forw; prev->forw = rev->forw;
if (head == pte_index) { if (head == pte_index) {
...@@ -292,11 +292,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -292,11 +292,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Find and lock the HPTEG slot to use */ /* Find and lock the HPTEG slot to use */
do_insert: do_insert:
if (pte_index >= kvm->arch.hpt_npte) if (pte_index >= kvm->arch.hpt.npte)
return H_PARAMETER; return H_PARAMETER;
if (likely((flags & H_EXACT) == 0)) { if (likely((flags & H_EXACT) == 0)) {
pte_index &= ~7UL; pte_index &= ~7UL;
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
for (i = 0; i < 8; ++i) { for (i = 0; i < 8; ++i) {
if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 && if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
...@@ -327,7 +327,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -327,7 +327,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
} }
pte_index += i; pte_index += i;
} else { } else {
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
HPTE_V_ABSENT)) { HPTE_V_ABSENT)) {
/* Lock the slot and check again */ /* Lock the slot and check again */
...@@ -344,7 +344,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -344,7 +344,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
} }
/* Save away the guest's idea of the second HPTE dword */ /* Save away the guest's idea of the second HPTE dword */
rev = &kvm->arch.revmap[pte_index]; rev = &kvm->arch.hpt.rev[pte_index];
if (realmode) if (realmode)
rev = real_vmalloc_addr(rev); rev = real_vmalloc_addr(rev);
if (rev) { if (rev) {
...@@ -469,9 +469,9 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, ...@@ -469,9 +469,9 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return H_FUNCTION; return H_FUNCTION;
if (pte_index >= kvm->arch.hpt_npte) if (pte_index >= kvm->arch.hpt.npte)
return H_PARAMETER; return H_PARAMETER;
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax(); cpu_relax();
pte = orig_pte = be64_to_cpu(hpte[0]); pte = orig_pte = be64_to_cpu(hpte[0]);
...@@ -487,7 +487,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, ...@@ -487,7 +487,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
return H_NOT_FOUND; return H_NOT_FOUND;
} }
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
v = pte & ~HPTE_V_HVLOCK; v = pte & ~HPTE_V_HVLOCK;
if (v & HPTE_V_VALID) { if (v & HPTE_V_VALID) {
hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
...@@ -557,13 +557,13 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) ...@@ -557,13 +557,13 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
break; break;
} }
if (req != 1 || flags == 3 || if (req != 1 || flags == 3 ||
pte_index >= kvm->arch.hpt_npte) { pte_index >= kvm->arch.hpt.npte) {
/* parameter error */ /* parameter error */
args[j] = ((0xa0 | flags) << 56) + pte_index; args[j] = ((0xa0 | flags) << 56) + pte_index;
ret = H_PARAMETER; ret = H_PARAMETER;
break; break;
} }
hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4)); hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
/* to avoid deadlock, don't spin except for first */ /* to avoid deadlock, don't spin except for first */
if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
if (n) if (n)
...@@ -600,7 +600,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) ...@@ -600,7 +600,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
} }
args[j] = ((0x80 | flags) << 56) + pte_index; args[j] = ((0x80 | flags) << 56) + pte_index;
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
note_hpte_modification(kvm, rev); note_hpte_modification(kvm, rev);
if (!(hp0 & HPTE_V_VALID)) { if (!(hp0 & HPTE_V_VALID)) {
...@@ -657,10 +657,10 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -657,10 +657,10 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return H_FUNCTION; return H_FUNCTION;
if (pte_index >= kvm->arch.hpt_npte) if (pte_index >= kvm->arch.hpt.npte)
return H_PARAMETER; return H_PARAMETER;
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax(); cpu_relax();
v = pte_v = be64_to_cpu(hpte[0]); v = pte_v = be64_to_cpu(hpte[0]);
...@@ -680,7 +680,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -680,7 +680,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
/* Update guest view of 2nd HPTE dword */ /* Update guest view of 2nd HPTE dword */
mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
HPTE_R_KEY_HI | HPTE_R_KEY_LO; HPTE_R_KEY_HI | HPTE_R_KEY_LO;
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
if (rev) { if (rev) {
r = (rev->guest_rpte & ~mask) | bits; r = (rev->guest_rpte & ~mask) | bits;
rev->guest_rpte = r; rev->guest_rpte = r;
...@@ -728,15 +728,15 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -728,15 +728,15 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return H_FUNCTION; return H_FUNCTION;
if (pte_index >= kvm->arch.hpt_npte) if (pte_index >= kvm->arch.hpt.npte)
return H_PARAMETER; return H_PARAMETER;
if (flags & H_READ_4) { if (flags & H_READ_4) {
pte_index &= ~3; pte_index &= ~3;
n = 4; n = 4;
} }
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
for (i = 0; i < n; ++i, ++pte_index) { for (i = 0; i < n; ++i, ++pte_index) {
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
r = be64_to_cpu(hpte[1]); r = be64_to_cpu(hpte[1]);
if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (cpu_has_feature(CPU_FTR_ARCH_300)) {
...@@ -769,11 +769,11 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -769,11 +769,11 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return H_FUNCTION; return H_FUNCTION;
if (pte_index >= kvm->arch.hpt_npte) if (pte_index >= kvm->arch.hpt.npte)
return H_PARAMETER; return H_PARAMETER;
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax(); cpu_relax();
v = be64_to_cpu(hpte[0]); v = be64_to_cpu(hpte[0]);
...@@ -817,11 +817,11 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -817,11 +817,11 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return H_FUNCTION; return H_FUNCTION;
if (pte_index >= kvm->arch.hpt_npte) if (pte_index >= kvm->arch.hpt.npte)
return H_PARAMETER; return H_PARAMETER;
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax(); cpu_relax();
v = be64_to_cpu(hpte[0]); v = be64_to_cpu(hpte[0]);
...@@ -970,7 +970,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, ...@@ -970,7 +970,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
somask = (1UL << 28) - 1; somask = (1UL << 28) - 1;
vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
} }
hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt.mask;
avpn = slb_v & ~(somask >> 16); /* also includes B */ avpn = slb_v & ~(somask >> 16); /* also includes B */
avpn |= (eaddr & somask) >> 16; avpn |= (eaddr & somask) >> 16;
...@@ -981,7 +981,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, ...@@ -981,7 +981,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
val |= avpn; val |= avpn;
for (;;) { for (;;) {
hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7)); hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
for (i = 0; i < 16; i += 2) { for (i = 0; i < 16; i += 2) {
/* Read the PTE racily */ /* Read the PTE racily */
...@@ -1017,7 +1017,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, ...@@ -1017,7 +1017,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
if (val & HPTE_V_SECONDARY) if (val & HPTE_V_SECONDARY)
break; break;
val |= HPTE_V_SECONDARY; val |= HPTE_V_SECONDARY;
hash = hash ^ kvm->arch.hpt_mask; hash = hash ^ kvm->arch.hpt.mask;
} }
return -1; return -1;
} }
...@@ -1066,14 +1066,14 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, ...@@ -1066,14 +1066,14 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return status; /* there really was no HPTE */ return status; /* there really was no HPTE */
return 0; /* for prot fault, HPTE disappeared */ return 0; /* for prot fault, HPTE disappeared */
} }
hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
r = be64_to_cpu(hpte[1]); r = be64_to_cpu(hpte[1]);
if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (cpu_has_feature(CPU_FTR_ARCH_300)) {
v = hpte_new_to_old_v(v, r); v = hpte_new_to_old_v(v, r);
r = hpte_new_to_old_r(r); r = hpte_new_to_old_r(r);
} }
rev = real_vmalloc_addr(&kvm->arch.revmap[index]); rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
gr = rev->guest_rpte; gr = rev->guest_rpte;
unlock_hpte(hpte, orig_v); unlock_hpte(hpte, orig_v);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment