Commit 7284ca8a authored by Simon Guo's avatar Simon Guo Committed by Paul Mackerras

KVM: PPC: Book3S PR: Support TAR handling for PR KVM HTM

Currently guest kernel doesn't handle TAR facility unavailable and it
always runs with TAR bit on. PR KVM will lazily enable TAR. TAR is not
a frequent-use register and it is not included in SVCPU struct.

Due to the above, the checkpointed TAR val might be a bogus TAR val.
To solve this issue, we will make vcpu->arch.fscr tar bit consistent
with shadow_fscr when TM is enabled.

At the end of emulating treclaim., the correct TAR val need to be loaded
into the register if FSCR_TAR bit is on.

At the beginning of emulating trechkpt., TAR needs to be flushed so that
the right tar val can be copied into tar_tm.

Tested with:
tools/testing/selftests/powerpc/tm/tm-tar
tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar (remove DSCR/PPR
related testing).
Signed-off-by: default avatarSimon Guo <wei.guo.simon@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 68ab07b9
...@@ -271,6 +271,8 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} ...@@ -271,6 +271,8 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif #endif
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
extern int kvm_irq_bypass; extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
......
...@@ -173,6 +173,9 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) ...@@ -173,6 +173,9 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
guest_msr &= ~(MSR_TS_MASK); guest_msr &= ~(MSR_TS_MASK);
kvmppc_set_msr(vcpu, guest_msr); kvmppc_set_msr(vcpu, guest_msr);
preempt_enable(); preempt_enable();
if (vcpu->arch.shadow_fscr & FSCR_TAR)
mtspr(SPRN_TAR, vcpu->arch.tar);
} }
static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu) static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
...@@ -185,6 +188,7 @@ static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu) ...@@ -185,6 +188,7 @@ static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
* copy. * copy.
*/ */
kvmppc_giveup_ext(vcpu, MSR_VSX); kvmppc_giveup_ext(vcpu, MSR_VSX);
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
kvmppc_copyto_vcpu_tm(vcpu); kvmppc_copyto_vcpu_tm(vcpu);
kvmppc_save_tm_sprs(vcpu); kvmppc_save_tm_sprs(vcpu);
......
...@@ -55,7 +55,9 @@ ...@@ -55,7 +55,9 @@
static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
ulong msr); ulong msr);
static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); #ifdef CONFIG_PPC_BOOK3S_64
static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
#endif
/* Some compatibility defines */ /* Some compatibility defines */
#ifdef CONFIG_PPC_BOOK3S_32 #ifdef CONFIG_PPC_BOOK3S_32
...@@ -346,6 +348,7 @@ void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) ...@@ -346,6 +348,7 @@ void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
return; return;
} }
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
kvmppc_giveup_ext(vcpu, MSR_VSX); kvmppc_giveup_ext(vcpu, MSR_VSX);
preempt_disable(); preempt_disable();
...@@ -357,8 +360,11 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) ...@@ -357,8 +360,11 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
{ {
if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
kvmppc_restore_tm_sprs(vcpu); kvmppc_restore_tm_sprs(vcpu);
if (kvmppc_get_msr(vcpu) & MSR_TM) if (kvmppc_get_msr(vcpu) & MSR_TM) {
kvmppc_handle_lost_math_exts(vcpu); kvmppc_handle_lost_math_exts(vcpu);
if (vcpu->arch.fscr & FSCR_TAR)
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
}
return; return;
} }
...@@ -366,9 +372,11 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) ...@@ -366,9 +372,11 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
_kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
preempt_enable(); preempt_enable();
if (kvmppc_get_msr(vcpu) & MSR_TM) if (kvmppc_get_msr(vcpu) & MSR_TM) {
kvmppc_handle_lost_math_exts(vcpu); kvmppc_handle_lost_math_exts(vcpu);
if (vcpu->arch.fscr & FSCR_TAR)
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
}
} }
#endif #endif
...@@ -819,7 +827,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) ...@@ -819,7 +827,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
} }
/* Give up facility (TAR / EBB / DSCR) */ /* Give up facility (TAR / EBB / DSCR) */
static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
{ {
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
...@@ -1020,7 +1028,12 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) ...@@ -1020,7 +1028,12 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
/* TAR got dropped, drop it in shadow too */ /* TAR got dropped, drop it in shadow too */
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
} else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
vcpu->arch.fscr = fscr;
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
return;
} }
vcpu->arch.fscr = fscr; vcpu->arch.fscr = fscr;
} }
#endif #endif
......
...@@ -172,15 +172,21 @@ _GLOBAL(_kvmppc_save_tm_pr) ...@@ -172,15 +172,21 @@ _GLOBAL(_kvmppc_save_tm_pr)
mfmsr r5 mfmsr r5
SAVE_GPR(5, r1) SAVE_GPR(5, r1)
/* also save DSCR/CR so that it can be recovered later */ /* also save DSCR/CR/TAR so that it can be recovered later */
mfspr r6, SPRN_DSCR mfspr r6, SPRN_DSCR
SAVE_GPR(6, r1) SAVE_GPR(6, r1)
mfcr r7 mfcr r7
stw r7, _CCR(r1) stw r7, _CCR(r1)
mfspr r8, SPRN_TAR
SAVE_GPR(8, r1)
bl __kvmppc_save_tm bl __kvmppc_save_tm
REST_GPR(8, r1)
mtspr SPRN_TAR, r8
ld r7, _CCR(r1) ld r7, _CCR(r1)
mtcr r7 mtcr r7
...@@ -340,15 +346,21 @@ _GLOBAL(_kvmppc_restore_tm_pr) ...@@ -340,15 +346,21 @@ _GLOBAL(_kvmppc_restore_tm_pr)
mfmsr r5 mfmsr r5
SAVE_GPR(5, r1) SAVE_GPR(5, r1)
/* also save DSCR/CR so that it can be recovered later */ /* also save DSCR/CR/TAR so that it can be recovered later */
mfspr r6, SPRN_DSCR mfspr r6, SPRN_DSCR
SAVE_GPR(6, r1) SAVE_GPR(6, r1)
mfcr r7 mfcr r7
stw r7, _CCR(r1) stw r7, _CCR(r1)
mfspr r8, SPRN_TAR
SAVE_GPR(8, r1)
bl __kvmppc_restore_tm bl __kvmppc_restore_tm
REST_GPR(8, r1)
mtspr SPRN_TAR, r8
ld r7, _CCR(r1) ld r7, _CCR(r1)
mtcr r7 mtcr r7
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment