Commit a4bd6eb0 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Alexander Graf

KVM: PPC: Book3S HV: Add helpers for lock/unlock hpte

This adds helper routines for locking and unlocking HPTEs, and uses
them in the rest of the code.  We don't change any locking rules in
this patch.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 31037eca
...@@ -85,6 +85,20 @@ static inline long try_lock_hpte(__be64 *hpte, unsigned long bits) ...@@ -85,6 +85,20 @@ static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
return old == 0; return old == 0;
} }
static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
{
hpte_v &= ~HPTE_V_HVLOCK;
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
hpte[0] = cpu_to_be64(hpte_v);
}
/* Without barrier */
static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
{
hpte_v &= ~HPTE_V_HVLOCK;
hpte[0] = cpu_to_be64(hpte_v);
}
static inline int __hpte_actual_psize(unsigned int lp, int psize) static inline int __hpte_actual_psize(unsigned int lp, int psize)
{ {
int i, shift; int i, shift;
......
...@@ -338,9 +338,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -338,9 +338,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte; gr = kvm->arch.revmap[index].guest_rpte;
/* Unlock the HPTE */ unlock_hpte(hptep, v);
asm volatile("lwsync" : : : "memory");
hptep[0] = cpu_to_be64(v);
preempt_enable(); preempt_enable();
gpte->eaddr = eaddr; gpte->eaddr = eaddr;
...@@ -469,8 +467,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -469,8 +467,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
hpte[1] = be64_to_cpu(hptep[1]); hpte[1] = be64_to_cpu(hptep[1]);
hpte[2] = r = rev->guest_rpte; hpte[2] = r = rev->guest_rpte;
asm volatile("lwsync" : : : "memory"); unlock_hpte(hptep, hpte[0]);
hptep[0] = cpu_to_be64(hpte[0]);
preempt_enable(); preempt_enable();
if (hpte[0] != vcpu->arch.pgfault_hpte[0] || if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
...@@ -621,7 +618,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -621,7 +618,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hptep[1] = cpu_to_be64(r); hptep[1] = cpu_to_be64(r);
eieio(); eieio();
hptep[0] = cpu_to_be64(hpte[0]); __unlock_hpte(hptep, hpte[0]);
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
preempt_enable(); preempt_enable();
if (page && hpte_is_writable(r)) if (page && hpte_is_writable(r))
...@@ -642,7 +639,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -642,7 +639,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return ret; return ret;
out_unlock: out_unlock:
hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
preempt_enable(); preempt_enable();
goto out_put; goto out_put;
} }
...@@ -771,7 +768,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -771,7 +768,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
} }
} }
unlock_rmap(rmapp); unlock_rmap(rmapp);
hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} }
return 0; return 0;
} }
...@@ -857,7 +854,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -857,7 +854,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
} }
ret = 1; ret = 1;
} }
hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} while ((i = j) != head); } while ((i = j) != head);
unlock_rmap(rmapp); unlock_rmap(rmapp);
...@@ -974,8 +971,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) ...@@ -974,8 +971,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
/* Now check and modify the HPTE */ /* Now check and modify the HPTE */
if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) { if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
/* unlock and continue */ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
continue; continue;
} }
...@@ -996,9 +992,9 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) ...@@ -996,9 +992,9 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
npages_dirty = n; npages_dirty = n;
eieio(); eieio();
} }
v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID; v |= HPTE_V_VALID;
hptep[0] = cpu_to_be64(v); __unlock_hpte(hptep, v);
} while ((i = j) != head); } while ((i = j) != head);
unlock_rmap(rmapp); unlock_rmap(rmapp);
...@@ -1218,8 +1214,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp, ...@@ -1218,8 +1214,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
r &= ~HPTE_GR_MODIFIED; r &= ~HPTE_GR_MODIFIED;
revp->guest_rpte = r; revp->guest_rpte = r;
} }
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); unlock_hpte(hptp, be64_to_cpu(hptp[0]));
hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
preempt_enable(); preempt_enable();
if (!(valid == want_valid && (first_pass || dirty))) if (!(valid == want_valid && (first_pass || dirty)))
ok = 0; ok = 0;
......
...@@ -150,12 +150,6 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva, ...@@ -150,12 +150,6 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
} }
static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
{
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
hpte[0] = cpu_to_be64(hpte_v);
}
long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel, long pte_index, unsigned long pteh, unsigned long ptel,
pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
...@@ -271,10 +265,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -271,10 +265,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
u64 pte; u64 pte;
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax(); cpu_relax();
pte = be64_to_cpu(*hpte); pte = be64_to_cpu(hpte[0]);
if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
break; break;
*hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); __unlock_hpte(hpte, pte);
hpte += 2; hpte += 2;
} }
if (i == 8) if (i == 8)
...@@ -290,9 +284,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -290,9 +284,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax(); cpu_relax();
pte = be64_to_cpu(*hpte); pte = be64_to_cpu(hpte[0]);
if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
*hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); __unlock_hpte(hpte, pte);
return H_PTEG_FULL; return H_PTEG_FULL;
} }
} }
...@@ -331,7 +325,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -331,7 +325,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Write the first HPTE dword, unlocking the HPTE and making it valid */ /* Write the first HPTE dword, unlocking the HPTE and making it valid */
eieio(); eieio();
hpte[0] = cpu_to_be64(pteh); __unlock_hpte(hpte, pteh);
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
*pte_idx_ret = pte_index; *pte_idx_ret = pte_index;
...@@ -412,7 +406,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, ...@@ -412,7 +406,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
((flags & H_ANDCOND) && (pte & avpn) != 0)) { ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); __unlock_hpte(hpte, pte);
return H_NOT_FOUND; return H_NOT_FOUND;
} }
...@@ -548,7 +542,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) ...@@ -548,7 +542,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5); args[j] |= rcbits << (56 - 5);
hp[0] = 0; __unlock_hpte(hp, 0);
} }
} }
...@@ -574,7 +568,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -574,7 +568,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
pte = be64_to_cpu(hpte[0]); pte = be64_to_cpu(hpte[0]);
if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) { ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); __unlock_hpte(hpte, pte);
return H_NOT_FOUND; return H_NOT_FOUND;
} }
...@@ -755,8 +749,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, ...@@ -755,8 +749,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
/* Return with the HPTE still locked */ /* Return with the HPTE still locked */
return (hash << 3) + (i >> 1); return (hash << 3) + (i >> 1);
/* Unlock and move on */ __unlock_hpte(&hpte[i], v);
hpte[i] = cpu_to_be64(v);
} }
if (val & HPTE_V_SECONDARY) if (val & HPTE_V_SECONDARY)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment