Commit cb2553a0 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV P9: Optimise timebase reads

Reduce the number of mfTB executed by passing the current timebase
around entry and exit code rather than read it multiple times.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-25-npiggin@gmail.com
parent 6547af3e
...@@ -154,7 +154,7 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu) ...@@ -154,7 +154,7 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix; return radix;
} }
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr); int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif #endif
......
...@@ -276,22 +276,22 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) ...@@ -276,22 +276,22 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
* they should never fail.) * they should never fail.)
*/ */
static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc) static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&vc->stoltb_lock, flags); spin_lock_irqsave(&vc->stoltb_lock, flags);
vc->preempt_tb = mftb(); vc->preempt_tb = tb;
spin_unlock_irqrestore(&vc->stoltb_lock, flags); spin_unlock_irqrestore(&vc->stoltb_lock, flags);
} }
static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc) static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&vc->stoltb_lock, flags); spin_lock_irqsave(&vc->stoltb_lock, flags);
if (vc->preempt_tb != TB_NIL) { if (vc->preempt_tb != TB_NIL) {
vc->stolen_tb += mftb() - vc->preempt_tb; vc->stolen_tb += tb - vc->preempt_tb;
vc->preempt_tb = TB_NIL; vc->preempt_tb = TB_NIL;
} }
spin_unlock_irqrestore(&vc->stoltb_lock, flags); spin_unlock_irqrestore(&vc->stoltb_lock, flags);
...@@ -301,6 +301,7 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) ...@@ -301,6 +301,7 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{ {
struct kvmppc_vcore *vc = vcpu->arch.vcore; struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags; unsigned long flags;
u64 now = mftb();
/* /*
* We can test vc->runner without taking the vcore lock, * We can test vc->runner without taking the vcore lock,
...@@ -309,12 +310,12 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) ...@@ -309,12 +310,12 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
* ever sets it to NULL. * ever sets it to NULL.
*/ */
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_end_stolen(vc); kvmppc_core_end_stolen(vc, now);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu->arch.busy_preempt != TB_NIL) { vcpu->arch.busy_preempt != TB_NIL) {
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt;
vcpu->arch.busy_preempt = TB_NIL; vcpu->arch.busy_preempt = TB_NIL;
} }
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
...@@ -324,13 +325,14 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) ...@@ -324,13 +325,14 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcore *vc = vcpu->arch.vcore; struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags; unsigned long flags;
u64 now = mftb();
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_start_stolen(vc); kvmppc_core_start_stolen(vc, now);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = mftb(); vcpu->arch.busy_preempt = now;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
} }
...@@ -685,7 +687,7 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) ...@@ -685,7 +687,7 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
} }
static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
struct kvmppc_vcore *vc) struct kvmppc_vcore *vc, u64 tb)
{ {
struct dtl_entry *dt; struct dtl_entry *dt;
struct lppaca *vpa; struct lppaca *vpa;
...@@ -696,7 +698,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, ...@@ -696,7 +698,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
dt = vcpu->arch.dtl_ptr; dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr; vpa = vcpu->arch.vpa.pinned_addr;
now = mftb(); now = tb;
core_stolen = vcore_stolen_time(vc, now); core_stolen = vcore_stolen_time(vc, now);
stolen = core_stolen - vcpu->arch.stolen_logged; stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = core_stolen; vcpu->arch.stolen_logged = core_stolen;
...@@ -2914,14 +2916,14 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) ...@@ -2914,14 +2916,14 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
extern int __kvmppc_vcore_entry(void); extern int __kvmppc_vcore_entry(void);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu, u64 tb)
{ {
u64 now; u64 now;
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
return; return;
spin_lock_irq(&vcpu->arch.tbacct_lock); spin_lock_irq(&vcpu->arch.tbacct_lock);
now = mftb(); now = tb;
vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
vcpu->arch.stolen_logged; vcpu->arch.stolen_logged;
vcpu->arch.busy_preempt = now; vcpu->arch.busy_preempt = now;
...@@ -3172,14 +3174,14 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) ...@@ -3172,14 +3174,14 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
} }
/* Start accumulating stolen time */ /* Start accumulating stolen time */
kvmppc_core_start_stolen(vc); kvmppc_core_start_stolen(vc, mftb());
} }
static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
{ {
struct preempted_vcore_list *lp; struct preempted_vcore_list *lp;
kvmppc_core_end_stolen(vc); kvmppc_core_end_stolen(vc, mftb());
if (!list_empty(&vc->preempt_list)) { if (!list_empty(&vc->preempt_list)) {
lp = &per_cpu(preempted_vcores, vc->pcpu); lp = &per_cpu(preempted_vcores, vc->pcpu);
spin_lock(&lp->lock); spin_lock(&lp->lock);
...@@ -3306,7 +3308,7 @@ static void prepare_threads(struct kvmppc_vcore *vc) ...@@ -3306,7 +3308,7 @@ static void prepare_threads(struct kvmppc_vcore *vc)
vcpu->arch.ret = RESUME_GUEST; vcpu->arch.ret = RESUME_GUEST;
else else
continue; continue;
kvmppc_remove_runnable(vc, vcpu); kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run); wake_up(&vcpu->arch.cpu_run);
} }
} }
...@@ -3325,7 +3327,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) ...@@ -3325,7 +3327,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
list_del_init(&pvc->preempt_list); list_del_init(&pvc->preempt_list);
if (pvc->runner == NULL) { if (pvc->runner == NULL) {
pvc->vcore_state = VCORE_INACTIVE; pvc->vcore_state = VCORE_INACTIVE;
kvmppc_core_end_stolen(pvc); kvmppc_core_end_stolen(pvc, mftb());
} }
spin_unlock(&pvc->lock); spin_unlock(&pvc->lock);
continue; continue;
...@@ -3334,7 +3336,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) ...@@ -3334,7 +3336,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
spin_unlock(&pvc->lock); spin_unlock(&pvc->lock);
continue; continue;
} }
kvmppc_core_end_stolen(pvc); kvmppc_core_end_stolen(pvc, mftb());
pvc->vcore_state = VCORE_PIGGYBACK; pvc->vcore_state = VCORE_PIGGYBACK;
if (cip->total_threads >= target_threads) if (cip->total_threads >= target_threads)
break; break;
...@@ -3401,7 +3403,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ...@@ -3401,7 +3403,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
else else
++still_running; ++still_running;
} else { } else {
kvmppc_remove_runnable(vc, vcpu); kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run); wake_up(&vcpu->arch.cpu_run);
} }
} }
...@@ -3410,7 +3412,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ...@@ -3410,7 +3412,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
kvmppc_vcore_preempt(vc); kvmppc_vcore_preempt(vc);
} else if (vc->runner) { } else if (vc->runner) {
vc->vcore_state = VCORE_PREEMPT; vc->vcore_state = VCORE_PREEMPT;
kvmppc_core_start_stolen(vc); kvmppc_core_start_stolen(vc, mftb());
} else { } else {
vc->vcore_state = VCORE_INACTIVE; vc->vcore_state = VCORE_INACTIVE;
} }
...@@ -3541,7 +3543,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -3541,7 +3543,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
for_each_runnable_thread(i, vcpu, vc) { for_each_runnable_thread(i, vcpu, vc) {
vcpu->arch.ret = -EBUSY; vcpu->arch.ret = -EBUSY;
kvmppc_remove_runnable(vc, vcpu); kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run); wake_up(&vcpu->arch.cpu_run);
} }
goto out; goto out;
...@@ -3673,7 +3675,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -3673,7 +3675,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
pvc->pcpu = pcpu + thr; pvc->pcpu = pcpu + thr;
for_each_runnable_thread(i, vcpu, pvc) { for_each_runnable_thread(i, vcpu, pvc) {
kvmppc_start_thread(vcpu, pvc); kvmppc_start_thread(vcpu, pvc);
kvmppc_create_dtl_entry(vcpu, pvc); kvmppc_create_dtl_entry(vcpu, pvc, mftb());
trace_kvm_guest_enter(vcpu); trace_kvm_guest_enter(vcpu);
if (!vcpu->arch.ptid) if (!vcpu->arch.ptid)
thr0_done = true; thr0_done = true;
...@@ -4152,20 +4154,17 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) ...@@ -4152,20 +4154,17 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
* Guest entry for POWER9 and later CPUs. * Guest entry for POWER9 and later CPUs.
*/ */
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr) unsigned long lpcr, u64 *tb)
{ {
struct kvmppc_vcore *vc = vcpu->arch.vcore; struct kvmppc_vcore *vc = vcpu->arch.vcore;
struct p9_host_os_sprs host_os_sprs; struct p9_host_os_sprs host_os_sprs;
s64 dec; s64 dec;
u64 tb, next_timer; u64 next_timer;
unsigned long msr; unsigned long msr;
int trap; int trap;
WARN_ON_ONCE(vcpu->arch.ceded);
tb = mftb();
next_timer = timer_get_next_tb(); next_timer = timer_get_next_tb();
if (tb >= next_timer) if (*tb >= next_timer)
return BOOK3S_INTERRUPT_HV_DECREMENTER; return BOOK3S_INTERRUPT_HV_DECREMENTER;
if (next_timer < time_limit) if (next_timer < time_limit)
time_limit = next_timer; time_limit = next_timer;
...@@ -4262,7 +4261,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4262,7 +4261,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
* *
* XXX: Another day's problem. * XXX: Another day's problem.
*/ */
mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - tb); mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb);
mtspr(SPRN_DAR, vcpu->arch.shregs.dar); mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
...@@ -4278,8 +4277,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4278,8 +4277,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
dec = mfspr(SPRN_DEC); dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec; dec = (s32) dec;
tb = mftb(); *tb = mftb();
vcpu->arch.dec_expires = dec + (tb + vc->tb_offset); vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
/* H_CEDE has to be handled now, not later */ /* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
...@@ -4291,7 +4290,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4291,7 +4290,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
} else { } else {
kvmppc_xive_push_vcpu(vcpu); kvmppc_xive_push_vcpu(vcpu);
trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr); trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb);
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
!(vcpu->arch.shregs.msr & MSR_PR)) { !(vcpu->arch.shregs.msr & MSR_PR)) {
unsigned long req = kvmppc_get_gpr(vcpu, 3); unsigned long req = kvmppc_get_gpr(vcpu, 3);
...@@ -4322,6 +4321,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4322,6 +4321,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
store_spr_state(vcpu); store_spr_state(vcpu);
timer_rearm_host_dec(*tb);
restore_p9_host_os_sprs(vcpu, &host_os_sprs); restore_p9_host_os_sprs(vcpu, &host_os_sprs);
store_fp_state(&vcpu->arch.fp); store_fp_state(&vcpu->arch.fp);
...@@ -4341,8 +4342,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4341,8 +4342,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vc->entry_exit_map = 0x101; vc->entry_exit_map = 0x101;
vc->in_guest = 0; vc->in_guest = 0;
timer_rearm_host_dec(tb);
kvmppc_subcore_exit_guest(); kvmppc_subcore_exit_guest();
return trap; return trap;
...@@ -4596,7 +4595,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) ...@@ -4596,7 +4595,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
if ((vc->vcore_state == VCORE_PIGGYBACK || if ((vc->vcore_state == VCORE_PIGGYBACK ||
vc->vcore_state == VCORE_RUNNING) && vc->vcore_state == VCORE_RUNNING) &&
!VCORE_IS_EXITING(vc)) { !VCORE_IS_EXITING(vc)) {
kvmppc_create_dtl_entry(vcpu, vc); kvmppc_create_dtl_entry(vcpu, vc, mftb());
kvmppc_start_thread(vcpu, vc); kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu); trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) { } else if (vc->vcore_state == VCORE_SLEEPING) {
...@@ -4631,7 +4630,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) ...@@ -4631,7 +4630,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
for_each_runnable_thread(i, v, vc) { for_each_runnable_thread(i, v, vc) {
kvmppc_core_prepare_to_enter(v); kvmppc_core_prepare_to_enter(v);
if (signal_pending(v->arch.run_task)) { if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v); kvmppc_remove_runnable(vc, v, mftb());
v->stat.signal_exits++; v->stat.signal_exits++;
v->run->exit_reason = KVM_EXIT_INTR; v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR; v->arch.ret = -EINTR;
...@@ -4672,7 +4671,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) ...@@ -4672,7 +4671,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
kvmppc_vcore_end_preempt(vc); kvmppc_vcore_end_preempt(vc);
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
kvmppc_remove_runnable(vc, vcpu); kvmppc_remove_runnable(vc, vcpu, mftb());
vcpu->stat.signal_exits++; vcpu->stat.signal_exits++;
run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR; vcpu->arch.ret = -EINTR;
...@@ -4700,6 +4699,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4700,6 +4699,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm_nested_guest *nested = vcpu->arch.nested;
unsigned long flags; unsigned long flags;
u64 tb;
trace_kvmppc_run_vcpu_enter(vcpu); trace_kvmppc_run_vcpu_enter(vcpu);
...@@ -4710,7 +4710,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4710,7 +4710,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc = vcpu->arch.vcore; vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0; vcpu->arch.ceded = 0;
vcpu->arch.run_task = current; vcpu->arch.run_task = current;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL; vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
...@@ -4735,7 +4734,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4735,7 +4734,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_update_vpas(vcpu); kvmppc_update_vpas(vcpu);
init_vcore_to_run(vc); init_vcore_to_run(vc);
vc->preempt_tb = TB_NIL;
preempt_disable(); preempt_disable();
pcpu = smp_processor_id(); pcpu = smp_processor_id();
...@@ -4745,6 +4743,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4745,6 +4743,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
/* flags save not required, but irq_pmu has no disable/enable API */ /* flags save not required, but irq_pmu has no disable/enable API */
powerpc_local_irq_pmu_save(flags); powerpc_local_irq_pmu_save(flags);
if (signal_pending(current)) if (signal_pending(current))
goto sigpend; goto sigpend;
if (need_resched() || !kvm->arch.mmu_ready) if (need_resched() || !kvm->arch.mmu_ready)
...@@ -4767,12 +4766,17 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4767,12 +4766,17 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
goto out; goto out;
} }
tb = mftb();
vcpu->arch.stolen_logged = vcore_stolen_time(vc, tb);
vc->preempt_tb = TB_NIL;
kvmppc_clear_host_core(pcpu); kvmppc_clear_host_core(pcpu);
local_paca->kvm_hstate.napping = 0; local_paca->kvm_hstate.napping = 0;
local_paca->kvm_hstate.kvm_split_mode = NULL; local_paca->kvm_hstate.kvm_split_mode = NULL;
kvmppc_start_thread(vcpu, vc); kvmppc_start_thread(vcpu, vc);
kvmppc_create_dtl_entry(vcpu, vc); kvmppc_create_dtl_entry(vcpu, vc, tb);
trace_kvm_guest_enter(vcpu); trace_kvm_guest_enter(vcpu);
vc->vcore_state = VCORE_RUNNING; vc->vcore_state = VCORE_RUNNING;
...@@ -4787,7 +4791,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4787,7 +4791,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
/* Tell lockdep that we're about to enable interrupts */ /* Tell lockdep that we're about to enable interrupts */
trace_hardirqs_on(); trace_hardirqs_on();
trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr); trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb);
vcpu->arch.trap = trap; vcpu->arch.trap = trap;
trace_hardirqs_off(); trace_hardirqs_off();
...@@ -4829,7 +4833,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4829,7 +4833,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* by L2 and the L1 decrementer is provided in hdec_expires * by L2 and the L1 decrementer is provided in hdec_expires
*/ */
if (kvmppc_core_pending_dec(vcpu) && if (kvmppc_core_pending_dec(vcpu) &&
((get_tb() < kvmppc_dec_expires_host_tb(vcpu)) || ((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
(trap == BOOK3S_INTERRUPT_SYSCALL && (trap == BOOK3S_INTERRUPT_SYSCALL &&
kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED))) kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
kvmppc_core_dequeue_dec(vcpu); kvmppc_core_dequeue_dec(vcpu);
...@@ -4865,7 +4869,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4865,7 +4869,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
trace_kvmppc_run_core(vc, 1); trace_kvmppc_run_core(vc, 1);
done: done:
kvmppc_remove_runnable(vc, vcpu); kvmppc_remove_runnable(vc, vcpu, tb);
trace_kvmppc_run_vcpu_exit(vcpu); trace_kvmppc_run_vcpu_exit(vcpu);
return vcpu->arch.ret; return vcpu->arch.ret;
......
...@@ -183,13 +183,13 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -183,13 +183,13 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
} }
} }
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr) int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm_nested_guest *nested = vcpu->arch.nested;
struct kvmppc_vcore *vc = vcpu->arch.vcore; struct kvmppc_vcore *vc = vcpu->arch.vcore;
s64 hdec, dec; s64 hdec, dec;
u64 tb, purr, spurr; u64 purr, spurr;
u64 *exsave; u64 *exsave;
bool ri_set; bool ri_set;
int trap; int trap;
...@@ -203,8 +203,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -203,8 +203,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
unsigned long host_dawr1; unsigned long host_dawr1;
unsigned long host_dawrx1; unsigned long host_dawrx1;
tb = mftb(); hdec = time_limit - *tb;
hdec = time_limit - tb;
if (hdec < 0) if (hdec < 0)
return BOOK3S_INTERRUPT_HV_DECREMENTER; return BOOK3S_INTERRUPT_HV_DECREMENTER;
...@@ -230,11 +229,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -230,11 +229,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
} }
if (vc->tb_offset) { if (vc->tb_offset) {
u64 new_tb = tb + vc->tb_offset; u64 new_tb = *tb + vc->tb_offset;
mtspr(SPRN_TBU40, new_tb); mtspr(SPRN_TBU40, new_tb);
tb = mftb(); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
if ((tb & 0xffffff) < (new_tb & 0xffffff)) new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb + 0x1000000); mtspr(SPRN_TBU40, new_tb);
}
*tb = new_tb;
vc->tb_offset_applied = vc->tb_offset; vc->tb_offset_applied = vc->tb_offset;
} }
...@@ -317,7 +318,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -317,7 +318,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/ */
mtspr(SPRN_HDEC, hdec); mtspr(SPRN_HDEC, hdec);
mtspr(SPRN_DEC, vcpu->arch.dec_expires - tb); mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_return_to_guest: tm_return_to_guest:
...@@ -466,15 +467,17 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -466,15 +467,17 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
dec = mfspr(SPRN_DEC); dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec; dec = (s32) dec;
tb = mftb(); *tb = mftb();
vcpu->arch.dec_expires = dec + tb; vcpu->arch.dec_expires = dec + *tb;
if (vc->tb_offset_applied) { if (vc->tb_offset_applied) {
u64 new_tb = tb - vc->tb_offset_applied; u64 new_tb = *tb - vc->tb_offset_applied;
mtspr(SPRN_TBU40, new_tb); mtspr(SPRN_TBU40, new_tb);
tb = mftb(); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
if ((tb & 0xffffff) < (new_tb & 0xffffff)) new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb + 0x1000000); mtspr(SPRN_TBU40, new_tb);
}
*tb = new_tb;
vc->tb_offset_applied = 0; vc->tb_offset_applied = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment