Commit 8ca40a70 authored by Christoffer Dall's avatar Christoffer Dall Committed by Avi Kivity

KVM: Take kvm instead of vcpu to mmu_notifier_retry

The mmu_notifier_retry is not specific to any vcpu (and never will be)
so only take struct kvm as a parameter.

The motivation is the ARM mmu code that needs to call this from
somewhere where we long let go of the vcpu pointer.
Signed-off-by: default avatarChristoffer Dall <c.dall@virtualopensystems.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 1f5b77f5
...@@ -710,7 +710,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -710,7 +710,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Check if we might have been invalidated; let the guest retry if so */ /* Check if we might have been invalidated; let the guest retry if so */
ret = RESUME_GUEST; ret = RESUME_GUEST;
if (mmu_notifier_retry(vcpu, mmu_seq)) { if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
unlock_rmap(rmap); unlock_rmap(rmap);
goto out_unlock; goto out_unlock;
} }
......
...@@ -297,7 +297,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -297,7 +297,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
lock_rmap(rmap); lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */ /* Check for pending invalidations under the rmap chain lock */
if (kvm->arch.using_mmu_notifiers && if (kvm->arch.using_mmu_notifiers &&
mmu_notifier_retry(vcpu, mmu_seq)) { mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */ /* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT; pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID; pteh &= ~HPTE_V_VALID;
......
...@@ -2886,7 +2886,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, ...@@ -2886,7 +2886,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
return r; return r;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq)) if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock; goto out_unlock;
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
if (likely(!force_pt_level)) if (likely(!force_pt_level))
...@@ -3355,7 +3355,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ...@@ -3355,7 +3355,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
return r; return r;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq)) if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock; goto out_unlock;
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
if (likely(!force_pt_level)) if (likely(!force_pt_level))
......
...@@ -565,7 +565,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -565,7 +565,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return r; return r;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq)) if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock; goto out_unlock;
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
......
...@@ -841,9 +841,9 @@ extern struct kvm_stats_debugfs_item debugfs_entries[]; ...@@ -841,9 +841,9 @@ extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir; extern struct dentry *kvm_debugfs_dir;
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
{ {
if (unlikely(vcpu->kvm->mmu_notifier_count)) if (unlikely(kvm->mmu_notifier_count))
return 1; return 1;
/* /*
* Ensure the read of mmu_notifier_count happens before the read * Ensure the read of mmu_notifier_count happens before the read
...@@ -856,7 +856,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se ...@@ -856,7 +856,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
* can't rely on kvm->mmu_lock to keep things ordered. * can't rely on kvm->mmu_lock to keep things ordered.
*/ */
smp_rmb(); smp_rmb();
if (vcpu->kvm->mmu_notifier_seq != mmu_seq) if (kvm->mmu_notifier_seq != mmu_seq)
return 1; return 1;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment