Commit e3d8ed55 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/kvm/book3s: Use find_kvm_host_pte in h_enter

Since kvmppc_do_h_enter can get called in realmode use low level
arch_spin_lock which is safe to be called in realmode.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-15-aneesh.kumar@linux.ibm.com
parent 9781e759
...@@ -281,11 +281,10 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -281,11 +281,10 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
{ {
long ret; long ret;
/* Protect linux PTE lookup from page table destruction */ preempt_disable();
rcu_read_lock_sched(); /* this disables preemption too */
ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
kvm->mm->pgd, false, pte_idx_ret); kvm->mm->pgd, false, pte_idx_ret);
rcu_read_unlock_sched(); preempt_enable();
if (ret == H_TOO_HARD) { if (ret == H_TOO_HARD) {
/* this can't happen */ /* this can't happen */
pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
......
...@@ -210,7 +210,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -210,7 +210,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pte_t *ptep; pte_t *ptep;
unsigned int writing; unsigned int writing;
unsigned long mmu_seq; unsigned long mmu_seq;
unsigned long rcbits, irq_flags = 0; unsigned long rcbits;
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return H_FUNCTION; return H_FUNCTION;
...@@ -248,17 +248,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -248,17 +248,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Translate to host virtual address */ /* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn); hva = __gfn_to_hva_memslot(memslot, gfn);
/*
* If we had a page table table change after lookup, we would arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
* retry via mmu_notifier_retry. ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
*/
if (!realmode)
local_irq_save(irq_flags);
/*
* If called in real mode we have MSR_EE = 0. Otherwise
* we disable irq above.
*/
ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
if (ptep) { if (ptep) {
pte_t pte; pte_t pte;
unsigned int host_pte_size; unsigned int host_pte_size;
...@@ -272,8 +264,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -272,8 +264,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* to <= host page size, if host is using hugepage * to <= host page size, if host is using hugepage
*/ */
if (host_pte_size < psize) { if (host_pte_size < psize) {
if (!realmode) arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
local_irq_restore(flags);
return H_PARAMETER; return H_PARAMETER;
} }
pte = kvmppc_read_update_linux_pte(ptep, writing); pte = kvmppc_read_update_linux_pte(ptep, writing);
...@@ -287,8 +278,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -287,8 +278,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pa |= gpa & ~PAGE_MASK; pa |= gpa & ~PAGE_MASK;
} }
} }
if (!realmode) arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
local_irq_restore(irq_flags);
ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1); ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
ptel |= pa; ptel |= pa;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment