Commit 1066f772 authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Book3S HV: Put huge-page HPTEs in rmap chain for base address

Currently, when a huge page is faulted in for a guest, we select the
rmap chain to insert the HPTE into based on the guest physical address
that the guest tried to access.  Since there is an rmap chain for each
system page, there are many rmap chains for the area covered by a huge
page (e.g. 256 for 16MB pages when PAGE_SIZE = 64kB), and the huge-page
HPTE could end up in any one of them.

For consistency, and to make the huge-page HPTEs easier to find, we now
put huge-page HPTEs in the rmap chain corresponding to the base address
of the huge page.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 55765483
...@@ -585,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -585,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
unsigned long *hptep, hpte[3], r; unsigned long *hptep, hpte[3], r;
unsigned long mmu_seq, psize, pte_size; unsigned long mmu_seq, psize, pte_size;
unsigned long gpa_base, gfn_base;
unsigned long gpa, gfn, hva, pfn; unsigned long gpa, gfn, hva, pfn;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
unsigned long *rmap; unsigned long *rmap;
...@@ -623,7 +624,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -623,7 +624,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Translate the logical address and get the page */ /* Translate the logical address and get the page */
psize = hpte_page_size(hpte[0], r); psize = hpte_page_size(hpte[0], r);
gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1)); gpa_base = r & HPTE_R_RPN & ~(psize - 1);
gfn_base = gpa_base >> PAGE_SHIFT;
gpa = gpa_base | (ea & (psize - 1));
gfn = gpa >> PAGE_SHIFT; gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn); memslot = gfn_to_memslot(kvm, gfn);
...@@ -635,6 +638,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -635,6 +638,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!kvm->arch.using_mmu_notifiers) if (!kvm->arch.using_mmu_notifiers)
return -EFAULT; /* should never get here */ return -EFAULT; /* should never get here */
/*
* This should never happen, because of the slot_is_aligned()
* check in kvmppc_do_h_enter().
*/
if (gfn_base < memslot->base_gfn)
return -EFAULT;
/* used to check for invalidations in progress */ /* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq; mmu_seq = kvm->mmu_notifier_seq;
smp_rmb(); smp_rmb();
...@@ -727,7 +737,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -727,7 +737,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
goto out_unlock; goto out_unlock;
hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; /* Always put the HPTE in the rmap chain for the page base address */
rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
lock_rmap(rmap); lock_rmap(rmap);
/* Check if we might have been invalidated; let the guest retry if so */ /* Check if we might have been invalidated; let the guest retry if so */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment