Commit 25fedfca authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Book3S HV: Move vcore preemption point up into kvmppc_run_vcpu

Rather than calling cond_resched() in kvmppc_run_core() before doing
the post-processing for the vcpus that we have just run (that is,
calling kvmppc_handle_exit_hv(), kvmppc_set_timer(), etc.), we now do
that post-processing before calling cond_resched(), and that post-
processing is moved out into its own function, post_guest_process().

The reschedule point is now in kvmppc_run_vcpu() and we define a new
vcore state, VCORE_PREEMPT, to indicate that that the vcore's runner
task is runnable but not running.  (Doing the reschedule with the
vcore in VCORE_INACTIVE state would be bad because there are potentially
other vcpus waiting for the runner in kvmppc_wait_for_exec() which
then wouldn't get woken up.)

Also, we make use of the handy cond_resched_lock() function, which
unlocks and relocks vc->lock for us around the reschedule.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 1f09c3ed
...@@ -304,8 +304,9 @@ struct kvmppc_vcore { ...@@ -304,8 +304,9 @@ struct kvmppc_vcore {
/* Values for vcore_state */ /* Values for vcore_state */
#define VCORE_INACTIVE 0 #define VCORE_INACTIVE 0
#define VCORE_SLEEPING 1 #define VCORE_SLEEPING 1
#define VCORE_RUNNING 2 #define VCORE_PREEMPT 2
#define VCORE_EXITING 3 #define VCORE_RUNNING 3
#define VCORE_EXITING 4
/* /*
* Struct used to manage memory for a virtual processor area * Struct used to manage memory for a virtual processor area
......
...@@ -1882,15 +1882,50 @@ static void prepare_threads(struct kvmppc_vcore *vc) ...@@ -1882,15 +1882,50 @@ static void prepare_threads(struct kvmppc_vcore *vc)
} }
} }
static void post_guest_process(struct kvmppc_vcore *vc)
{
u64 now;
long ret;
struct kvm_vcpu *vcpu, *vnext;
now = get_tb();
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
arch.run_list) {
/* cancel pending dec exception if dec is positive */
if (now < vcpu->arch.dec_expires &&
kvmppc_core_pending_dec(vcpu))
kvmppc_core_dequeue_dec(vcpu);
trace_kvm_guest_exit(vcpu);
ret = RESUME_GUEST;
if (vcpu->arch.trap)
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
vcpu->arch.run_task);
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
if (vcpu->arch.ceded) {
if (!is_kvmppc_resume_guest(ret))
kvmppc_end_cede(vcpu);
else
kvmppc_set_timer(vcpu);
}
if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
kvmppc_remove_runnable(vc, vcpu);
wake_up(&vcpu->arch.cpu_run);
}
}
}
/* /*
* Run a set of guest threads on a physical core. * Run a set of guest threads on a physical core.
* Called with vc->lock held. * Called with vc->lock held.
*/ */
static void kvmppc_run_core(struct kvmppc_vcore *vc) static void kvmppc_run_core(struct kvmppc_vcore *vc)
{ {
struct kvm_vcpu *vcpu, *vnext; struct kvm_vcpu *vcpu;
long ret;
u64 now;
int i; int i;
int srcu_idx; int srcu_idx;
...@@ -1922,8 +1957,11 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -1922,8 +1957,11 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
*/ */
if ((threads_per_core > 1) && if ((threads_per_core > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
vcpu->arch.ret = -EBUSY; vcpu->arch.ret = -EBUSY;
kvmppc_remove_runnable(vc, vcpu);
wake_up(&vcpu->arch.cpu_run);
}
goto out; goto out;
} }
...@@ -1979,44 +2017,12 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -1979,44 +2017,12 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
kvm_guest_exit(); kvm_guest_exit();
preempt_enable(); preempt_enable();
cond_resched();
spin_lock(&vc->lock); spin_lock(&vc->lock);
now = get_tb(); post_guest_process(vc);
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
/* cancel pending dec exception if dec is positive */
if (now < vcpu->arch.dec_expires &&
kvmppc_core_pending_dec(vcpu))
kvmppc_core_dequeue_dec(vcpu);
trace_kvm_guest_exit(vcpu);
ret = RESUME_GUEST;
if (vcpu->arch.trap)
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
vcpu->arch.run_task);
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
if (vcpu->arch.ceded) {
if (!is_kvmppc_resume_guest(ret))
kvmppc_end_cede(vcpu);
else
kvmppc_set_timer(vcpu);
}
}
out: out:
vc->vcore_state = VCORE_INACTIVE; vc->vcore_state = VCORE_INACTIVE;
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
arch.run_list) {
if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
kvmppc_remove_runnable(vc, vcpu);
wake_up(&vcpu->arch.cpu_run);
}
}
trace_kvmppc_run_core(vc, 1); trace_kvmppc_run_core(vc, 1);
} }
...@@ -2138,7 +2144,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -2138,7 +2144,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
} }
if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
break; break;
vc->runner = vcpu;
n_ceded = 0; n_ceded = 0;
list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
if (!v->arch.pending_exceptions) if (!v->arch.pending_exceptions)
...@@ -2146,10 +2151,17 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -2146,10 +2151,17 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
else else
v->arch.ceded = 0; v->arch.ceded = 0;
} }
if (n_ceded == vc->n_runnable) vc->runner = vcpu;
if (n_ceded == vc->n_runnable) {
kvmppc_vcore_blocked(vc); kvmppc_vcore_blocked(vc);
else } else if (should_resched()) {
vc->vcore_state = VCORE_PREEMPT;
/* Let something else run */
cond_resched_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
} else {
kvmppc_run_core(vc); kvmppc_run_core(vc);
}
vc->runner = NULL; vc->runner = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment