Commit dc891849 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/kvm/nested: Add helper to walk nested shadow linux page table.

The locking rules for walking nested shadow linux page table is different from process
scoped table. Hence add a helper for nested page table walk and also
add check whether we are holding the right locks.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-11-aneesh.kumar@linux.ibm.com
parent 4b99412e
...@@ -750,6 +750,24 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) ...@@ -750,6 +750,24 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
return kvm->arch.nested_guests[lpid]; return kvm->arch.nested_guests[lpid];
} }
static pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift)
{
struct kvm_nested_guest *gp;
pte_t *pte;
gp = kvmhv_find_nested(kvm, lpid);
if (!gp)
return NULL;
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
"%s called with kvm mmu_lock not held \n", __func__);
pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
return pte;
}
static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2) static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
{ {
return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK | return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
...@@ -792,19 +810,15 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap, ...@@ -792,19 +810,15 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
unsigned long clr, unsigned long set, unsigned long clr, unsigned long set,
unsigned long hpa, unsigned long mask) unsigned long hpa, unsigned long mask)
{ {
struct kvm_nested_guest *gp;
unsigned long gpa; unsigned long gpa;
unsigned int shift, lpid; unsigned int shift, lpid;
pte_t *ptep; pte_t *ptep;
gpa = n_rmap & RMAP_NESTED_GPA_MASK; gpa = n_rmap & RMAP_NESTED_GPA_MASK;
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
gp = kvmhv_find_nested(kvm, lpid);
if (!gp)
return;
/* Find the pte */ /* Find the pte */
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
/* /*
* If the pte is present and the pfn is still the same, update the pte. * If the pte is present and the pfn is still the same, update the pte.
* If the pfn has changed then this is a stale rmap entry, the nested * If the pfn has changed then this is a stale rmap entry, the nested
...@@ -854,7 +868,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, ...@@ -854,7 +868,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
return; return;
/* Find and invalidate the pte */ /* Find and invalidate the pte */
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
/* Don't spuriously invalidate ptes if the pfn has changed */ /* Don't spuriously invalidate ptes if the pfn has changed */
if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
...@@ -921,7 +935,7 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, ...@@ -921,7 +935,7 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
int shift; int shift;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
if (!shift) if (!shift)
shift = PAGE_SHIFT; shift = PAGE_SHIFT;
if (ptep && pte_present(*ptep)) { if (ptep && pte_present(*ptep)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment