Commit 5f1c248f authored by Scott Wood's avatar Scott Wood Committed by Alexander Graf

kvm/ppc: Call trace_hardirqs_on before entry

Currently this is only being done on 64-bit.  Rather than just move it
out of the 64-bit ifdef, move it to kvm_lazy_ee_enable() so that it is
consistent with lazy ee state, and so that we don't track more host
code as interrupts-enabled than necessary.

Rename kvm_lazy_ee_enable() to kvm_fix_ee_before_entry() to reflect
that this function now has a role on 32-bit as well.
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 4baa1d87
...@@ -394,10 +394,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn) ...@@ -394,10 +394,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
} }
} }
/* Please call after prepare_to_enter. This function puts the lazy ee state /*
back to normal mode, without actually enabling interrupts. */ * Please call after prepare_to_enter. This function puts the lazy ee and irq
static inline void kvmppc_lazy_ee_enable(void) * disabled tracking state back to normal mode, without actually enabling
* interrupts.
*/
static inline void kvmppc_fix_ee_before_entry(void)
{ {
trace_hardirqs_on();
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* Only need to enable IRQs by hard enabling them after this */ /* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0; local_paca->irq_happened = 0;
......
...@@ -890,7 +890,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -890,7 +890,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
local_irq_enable(); local_irq_enable();
r = s; r = s;
} else { } else {
kvmppc_lazy_ee_enable(); kvmppc_fix_ee_before_entry();
} }
} }
...@@ -1161,7 +1161,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1161,7 +1161,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_FP) if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
kvmppc_lazy_ee_enable(); kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu); ret = __kvmppc_vcpu_run(kvm_run, vcpu);
......
...@@ -698,7 +698,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -698,7 +698,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu); kvmppc_load_guest_fp(vcpu);
#endif #endif
kvmppc_lazy_ee_enable(); kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu); ret = __kvmppc_vcpu_run(kvm_run, vcpu);
...@@ -1168,7 +1168,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1168,7 +1168,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
local_irq_enable(); local_irq_enable();
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
} else { } else {
kvmppc_lazy_ee_enable(); kvmppc_fix_ee_before_entry();
} }
} }
......
...@@ -117,8 +117,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -117,8 +117,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
kvm_guest_exit(); kvm_guest_exit();
continue; continue;
} }
trace_hardirqs_on();
#endif #endif
kvm_guest_enter(); kvm_guest_enter();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment