Commit 639e4597 authored by David Gibson's avatar David Gibson Committed by Paul Mackerras

KVM: PPC: Book3S HV: Create kvmppc_unmap_hpte_helper()

The kvm_unmap_rmapp() function, called from certain MMU notifiers, is used
to force all guest mappings of a particular host page to be set ABSENT, and
removed from the reverse mappings.

For HPT resizing, we will have some cases where we want to set just a
single guest HPTE ABSENT and remove its reverse mappings.  To prepare with
this, we split out the logic from kvm_unmap_rmapp() to evict a single HPTE,
moving it to a new helper function.
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent f98a8bf9
...@@ -742,37 +742,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, ...@@ -742,37 +742,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, handler); return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
} }
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, /* Must be called with both HPTE and rmap locked */
unsigned long gfn) static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
unsigned long *rmapp, unsigned long gfn)
{ {
__be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
struct revmap_entry *rev = kvm->arch.hpt.rev; struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long h, i, j; unsigned long j, h;
__be64 *hptep;
unsigned long ptel, psize, rcbits; unsigned long ptel, psize, rcbits;
unsigned long *rmapp;
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
for (;;) {
lock_rmap(rmapp);
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
break;
}
/*
* To avoid an ABBA deadlock with the HPTE lock bit,
* we can't spin on the HPTE lock while holding the
* rmap chain lock.
*/
i = *rmapp & KVMPPC_RMAP_INDEX;
hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
cpu_relax();
continue;
}
j = rev[i].forw; j = rev[i].forw;
if (j == i) { if (j == i) {
/* chain is now empty */ /* chain is now empty */
...@@ -804,6 +782,39 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -804,6 +782,39 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
note_hpte_modification(kvm, &rev[i]); note_hpte_modification(kvm, &rev[i]);
} }
} }
}
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
unsigned long i;
__be64 *hptep;
unsigned long *rmapp;
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
for (;;) {
lock_rmap(rmapp);
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
break;
}
/*
* To avoid an ABBA deadlock with the HPTE lock bit,
* we can't spin on the HPTE lock while holding the
* rmap chain lock.
*/
i = *rmapp & KVMPPC_RMAP_INDEX;
hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
cpu_relax();
continue;
}
kvmppc_unmap_hpte(kvm, i, rmapp, gfn);
unlock_rmap(rmapp); unlock_rmap(rmapp);
__unlock_hpte(hptep, be64_to_cpu(hptep[0])); __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment