Commit e4335f53 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV: Implement scheduling wait interval counters in the VPA

PAPR specifies accumulated virtual processor wait intervals that relate
to partition scheduling interval times. Implement these counters in the
same way as they are repoted by dtl.
Reviewed-by: default avatarFabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220908132545.4085849-5-npiggin@gmail.com
parent 9511b5a0
......@@ -733,16 +733,15 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
}
static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
struct lppaca *vpa,
unsigned int pcpu, u64 now,
unsigned long stolen)
{
struct dtl_entry *dt;
struct lppaca *vpa;
dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr;
if (!dt || !vpa)
if (!dt)
return;
dt->dispatch_reason = 7;
......@@ -763,29 +762,23 @@ static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
/* order writing *dt vs. writing vpa->dtl_idx */
smp_wmb();
vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
vcpu->arch.dtl.dirty = true;
}
static void kvmppc_create_dtl_entry_p9(struct kvm_vcpu *vcpu,
struct kvmppc_vcore *vc,
u64 now)
{
unsigned long stolen;
stolen = vc->stolen_tb - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = vc->stolen_tb;
__kvmppc_create_dtl_entry(vcpu, vc->pcpu, now, stolen);
/* vcpu->arch.dtl.dirty is set by the caller */
}
static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
struct kvmppc_vcore *vc)
static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu,
struct kvmppc_vcore *vc)
{
struct lppaca *vpa;
unsigned long stolen;
unsigned long core_stolen;
u64 now;
unsigned long flags;
vpa = vcpu->arch.vpa.pinned_addr;
if (!vpa)
return;
now = mftb();
core_stolen = vcore_stolen_time(vc, now);
......@@ -796,7 +789,34 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
vcpu->arch.busy_stolen = 0;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
__kvmppc_create_dtl_entry(vcpu, vc->pcpu, now + vc->tb_offset, stolen);
vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen);
__kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen);
vcpu->arch.vpa.dirty = true;
}
static void kvmppc_update_vpa_dispatch_p9(struct kvm_vcpu *vcpu,
struct kvmppc_vcore *vc,
u64 now)
{
struct lppaca *vpa;
unsigned long stolen;
unsigned long stolen_delta;
vpa = vcpu->arch.vpa.pinned_addr;
if (!vpa)
return;
stolen = vc->stolen_tb;
stolen_delta = stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = stolen;
vpa->enqueue_dispatch_tb = cpu_to_be64(stolen);
__kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta);
vcpu->arch.vpa.dirty = true;
}
/* See if there is a doorbell interrupt pending for a vcpu */
......@@ -3852,7 +3872,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* kvmppc_core_prepare_to_enter.
*/
kvmppc_start_thread(vcpu, pvc);
kvmppc_create_dtl_entry(vcpu, pvc);
kvmppc_update_vpa_dispatch(vcpu, pvc);
trace_kvm_guest_enter(vcpu);
if (!vcpu->arch.ptid)
thr0_done = true;
......@@ -4449,7 +4469,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
if ((vc->vcore_state == VCORE_PIGGYBACK ||
vc->vcore_state == VCORE_RUNNING) &&
!VCORE_IS_EXITING(vc)) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_update_vpa_dispatch(vcpu, vc);
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
......@@ -4637,7 +4657,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
tb = mftb();
kvmppc_create_dtl_entry_p9(vcpu, vc, tb + vc->tb_offset);
kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset);
trace_kvm_guest_enter(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment