Commit 1395375c authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/ppc-kvm' into next

Merge one more commit from the topic branch we shared with the kvm-ppc
tree.

This brings in a fix to the code that scans for dirty pages during
migration of a VM, which was incorrectly triggering a warning.
parents 4336b933 bf8036a4
...@@ -635,6 +635,16 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, ...@@ -635,6 +635,16 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
unsigned long gpa, unsigned long hpa, unsigned long gpa, unsigned long hpa,
unsigned long nbytes); unsigned long nbytes);
static inline pte_t *
find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
unsigned *hshift)
{
pte_t *pte;
pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
return pte;
}
static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
unsigned *hshift) unsigned *hshift)
{ {
......
...@@ -1040,7 +1040,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, ...@@ -1040,7 +1040,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
{ {
unsigned long gfn = memslot->base_gfn + pagenum; unsigned long gfn = memslot->base_gfn + pagenum;
unsigned long gpa = gfn << PAGE_SHIFT; unsigned long gpa = gfn << PAGE_SHIFT;
pte_t *ptep; pte_t *ptep, pte;
unsigned int shift; unsigned int shift;
int ret = 0; int ret = 0;
unsigned long old, *rmapp; unsigned long old, *rmapp;
...@@ -1048,12 +1048,35 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, ...@@ -1048,12 +1048,35 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ret; return ret;
ptep = find_kvm_secondary_pte(kvm, gpa, &shift); /*
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { * For performance reasons we don't hold kvm->mmu_lock while walking the
ret = 1; * partition scoped table.
if (shift) */
ret = 1 << (shift - PAGE_SHIFT); ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
if (!ptep)
return 0;
pte = READ_ONCE(*ptep);
if (pte_present(pte) && pte_dirty(pte)) {
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
/*
* Recheck the pte again
*/
if (pte_val(pte) != pte_val(*ptep)) {
/*
* We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
* only find PAGE_SIZE pte entries here. We can continue
* to use the pte addr returned by above page table
* walk.
*/
if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
spin_unlock(&kvm->mmu_lock);
return 0;
}
}
ret = 1;
VM_BUG_ON(shift);
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
gpa, shift); gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment