Commit 53655ddd authored by Paul Mackerras's avatar Paul Mackerras Committed by Michael Ellerman

KVM: PPC: Book3S HV: Call kvmppc_handle_exit_hv() with vcore unlocked

Currently kvmppc_handle_exit_hv() is called with the vcore lock held
because it is called within a for_each_runnable_thread loop.
However, we already unlock the vcore within kvmppc_handle_exit_hv()
under certain circumstances, and this is safe because (a) any vcpus
that become runnable and are added to the runnable set by
kvmppc_run_vcpu() have their vcpu->arch.trap == 0 and can't actually
run in the guest (because the vcore state is VCORE_EXITING), and
(b) for_each_runnable_thread is safe against addition or removal
of vcpus from the runnable set.

Therefore, in order to simplify things for following patches, let's
drop the vcore lock in the for_each_runnable_thread loop, so
kvmppc_handle_exit_hv() gets called without the vcore lock held.
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 7854f754
...@@ -1084,7 +1084,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) ...@@ -1084,7 +1084,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST; return RESUME_GUEST;
} }
/* Called with vcpu->arch.vcore->lock held */
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk) struct task_struct *tsk)
{ {
...@@ -1205,10 +1204,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1205,10 +1204,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) : swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst; vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
/* Need vcore unlocked to call kvmppc_get_last_inst */
spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_debug_inst(run, vcpu); r = kvmppc_emulate_debug_inst(run, vcpu);
spin_lock(&vcpu->arch.vcore->lock);
} else { } else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -1224,12 +1220,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1224,12 +1220,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
r = EMULATE_FAIL; r = EMULATE_FAIL;
if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
cpu_has_feature(CPU_FTR_ARCH_300)) { cpu_has_feature(CPU_FTR_ARCH_300))
/* Need vcore unlocked to call kvmppc_get_last_inst */
spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_doorbell_instr(vcpu); r = kvmppc_emulate_doorbell_instr(vcpu);
spin_lock(&vcpu->arch.vcore->lock);
}
if (r == EMULATE_FAIL) { if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -2599,6 +2591,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ...@@ -2599,6 +2591,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
spin_lock(&vc->lock); spin_lock(&vc->lock);
now = get_tb(); now = get_tb();
for_each_runnable_thread(i, vcpu, vc) { for_each_runnable_thread(i, vcpu, vc) {
/*
* It's safe to unlock the vcore in the loop here, because
* for_each_runnable_thread() is safe against removal of
* the vcpu, and the vcore state is VCORE_EXITING here,
* so any vcpus becoming runnable will have their arch.trap
* set to zero and can't actually run in the guest.
*/
spin_unlock(&vc->lock);
/* cancel pending dec exception if dec is positive */ /* cancel pending dec exception if dec is positive */
if (now < vcpu->arch.dec_expires && if (now < vcpu->arch.dec_expires &&
kvmppc_core_pending_dec(vcpu)) kvmppc_core_pending_dec(vcpu))
...@@ -2614,6 +2614,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ...@@ -2614,6 +2614,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
vcpu->arch.ret = ret; vcpu->arch.ret = ret;
vcpu->arch.trap = 0; vcpu->arch.trap = 0;
spin_lock(&vc->lock);
if (is_kvmppc_resume_guest(vcpu->arch.ret)) { if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
if (vcpu->arch.pending_exceptions) if (vcpu->arch.pending_exceptions)
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment