Commit cb2553a0 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV P9: Optimise timebase reads

Reduce the number of mfTB executed by passing the current timebase
around entry and exit code rather than read it multiple times.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-25-npiggin@gmail.com
parent 6547af3e
...@@ -154,7 +154,7 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu) ...@@ -154,7 +154,7 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix; return radix;
} }
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr); int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif #endif
......
This diff is collapsed.
...@@ -183,13 +183,13 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -183,13 +183,13 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
} }
} }
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr) int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm_nested_guest *nested = vcpu->arch.nested;
struct kvmppc_vcore *vc = vcpu->arch.vcore; struct kvmppc_vcore *vc = vcpu->arch.vcore;
s64 hdec, dec; s64 hdec, dec;
u64 tb, purr, spurr; u64 purr, spurr;
u64 *exsave; u64 *exsave;
bool ri_set; bool ri_set;
int trap; int trap;
...@@ -203,8 +203,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -203,8 +203,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
unsigned long host_dawr1; unsigned long host_dawr1;
unsigned long host_dawrx1; unsigned long host_dawrx1;
tb = mftb(); hdec = time_limit - *tb;
hdec = time_limit - tb;
if (hdec < 0) if (hdec < 0)
return BOOK3S_INTERRUPT_HV_DECREMENTER; return BOOK3S_INTERRUPT_HV_DECREMENTER;
...@@ -230,11 +229,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -230,11 +229,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
} }
if (vc->tb_offset) { if (vc->tb_offset) {
u64 new_tb = tb + vc->tb_offset; u64 new_tb = *tb + vc->tb_offset;
mtspr(SPRN_TBU40, new_tb); mtspr(SPRN_TBU40, new_tb);
tb = mftb(); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
if ((tb & 0xffffff) < (new_tb & 0xffffff)) new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb + 0x1000000); mtspr(SPRN_TBU40, new_tb);
}
*tb = new_tb;
vc->tb_offset_applied = vc->tb_offset; vc->tb_offset_applied = vc->tb_offset;
} }
...@@ -317,7 +318,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -317,7 +318,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/ */
mtspr(SPRN_HDEC, hdec); mtspr(SPRN_HDEC, hdec);
mtspr(SPRN_DEC, vcpu->arch.dec_expires - tb); mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_return_to_guest: tm_return_to_guest:
...@@ -466,15 +467,17 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -466,15 +467,17 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
dec = mfspr(SPRN_DEC); dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec; dec = (s32) dec;
tb = mftb(); *tb = mftb();
vcpu->arch.dec_expires = dec + tb; vcpu->arch.dec_expires = dec + *tb;
if (vc->tb_offset_applied) { if (vc->tb_offset_applied) {
u64 new_tb = tb - vc->tb_offset_applied; u64 new_tb = *tb - vc->tb_offset_applied;
mtspr(SPRN_TBU40, new_tb); mtspr(SPRN_TBU40, new_tb);
tb = mftb(); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
if ((tb & 0xffffff) < (new_tb & 0xffffff)) new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb + 0x1000000); mtspr(SPRN_TBU40, new_tb);
}
*tb = new_tb;
vc->tb_offset_applied = 0; vc->tb_offset_applied = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment