Commit 6cdf3037 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/kvm/book3s: Use kvm helpers to walk shadow or secondary table

update kvmppc_hv_handle_set_rc to use find_kvm_nested_guest_pte and
find_kvm_secondary_pte
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-12-aneesh.kumar@linux.ibm.com
parent dc891849
...@@ -198,7 +198,7 @@ extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, ...@@ -198,7 +198,7 @@ extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift, unsigned int shift,
const struct kvm_memory_slot *memslot, const struct kvm_memory_slot *memslot,
unsigned int lpid); unsigned int lpid);
extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
bool writing, unsigned long gpa, bool writing, unsigned long gpa,
unsigned int lpid); unsigned int lpid);
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
......
...@@ -647,6 +647,9 @@ static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, ...@@ -647,6 +647,9 @@ static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
return pte; return pte;
} }
extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */ #endif /* __ASM_KVM_BOOK3S_64_H__ */
...@@ -735,7 +735,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, ...@@ -735,7 +735,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
return ret; return ret;
} }
bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing, bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
unsigned long gpa, unsigned int lpid) unsigned long gpa, unsigned int lpid)
{ {
unsigned long pgflags; unsigned long pgflags;
...@@ -750,12 +750,12 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing, ...@@ -750,12 +750,12 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
pgflags = _PAGE_ACCESSED; pgflags = _PAGE_ACCESSED;
if (writing) if (writing)
pgflags |= _PAGE_DIRTY; pgflags |= _PAGE_DIRTY;
/*
* We are walking the secondary (partition-scoped) page table here. if (nested)
* We can do this without disabling irq because the Linux MM ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
* subsystem doesn't do THP splits and collapses on this tree. else
*/ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) { if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
return true; return true;
...@@ -949,8 +949,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -949,8 +949,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Failed to set the reference/change bits */ /* Failed to set the reference/change bits */
if (dsisr & DSISR_SET_RC) { if (dsisr & DSISR_SET_RC) {
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, if (kvmppc_hv_handle_set_rc(kvm, false, writing,
writing, gpa, kvm->arch.lpid)) gpa, kvm->arch.lpid))
dsisr &= ~DSISR_SET_RC; dsisr &= ~DSISR_SET_RC;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
......
...@@ -750,7 +750,7 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) ...@@ -750,7 +750,7 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
return kvm->arch.nested_guests[lpid]; return kvm->arch.nested_guests[lpid];
} }
static pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift) unsigned long ea, unsigned *hshift)
{ {
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
...@@ -767,7 +767,6 @@ static pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, ...@@ -767,7 +767,6 @@ static pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
return pte; return pte;
} }
static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2) static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
{ {
return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK | return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
...@@ -1226,7 +1225,7 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, ...@@ -1226,7 +1225,7 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */ /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing, ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
gpte.raddr, kvm->arch.lpid); gpte.raddr, kvm->arch.lpid);
if (!ret) { if (!ret) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1234,8 +1233,8 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, ...@@ -1234,8 +1233,8 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
} }
/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */ /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa, ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
gp->shadow_lpid); n_gpa, gp->shadow_lpid);
if (!ret) if (!ret)
ret = -EINVAL; ret = -EINVAL;
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment