Commit 6f597c6b authored by Simon Guo's avatar Simon Guo Committed by Paul Mackerras

KVM: PPC: Book3S PR: Add guest MSR parameter for kvmppc_save_tm()/kvmppc_restore_tm()

HV KVM and PR KVM need different MSR source to indicate whether
treclaim. or trecheckpoint. is necessary.

This patch add new parameter (guest MSR) for these kvmppc_save_tm/
kvmppc_restore_tm() APIs:
- For HV KVM, it is VCPU_MSR
- For PR KVM, it is current host MSR or VCPU_SHADOW_SRR1

This enhancement enables these 2 APIs to be reused by PR KVM later.
And the patch keeps HV KVM logic unchanged.

This patch also reworks kvmppc_save_tm()/kvmppc_restore_tm() to
have a clean ABI: r3 for vcpu and r4 for guest_msr.

During kvmppc_save_tm/kvmppc_restore_tm(), the R1 need to be saved
or restored. Currently the R1 is saved into HSTATE_HOST_R1. In PR
KVM, we are going to add a C function wrapper for
kvmppc_save_tm/kvmppc_restore_tm() where the R1 will be incremented
with added stackframe and save into HSTATE_HOST_R1. There are several
places in HV KVM to load HSTATE_HOST_R1 as R1, and we don't want to
bring risk or confusion by TM code.

This patch will use HSTATE_SCRATCH2 to save/restore R1 in
kvmppc_save_tm/kvmppc_restore_tm() to avoid future confusion, since
the r1 is actually a temporary/scratch value to be saved/stored.

[paulus@ozlabs.org - rebased on top of 7b0e827c ("KVM: PPC: Book3S HV:
 Factor fake-suspend handling out of kvmppc_save/restore_tm", 2018-05-30)]
Signed-off-by: default avatarSimon Guo <wei.guo.simon@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 009c872a
...@@ -793,7 +793,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) ...@@ -793,7 +793,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/* /*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
*/ */
mr r3, r4
ld r4, VCPU_MSR(r3)
bl kvmppc_restore_tm_hv bl kvmppc_restore_tm_hv
ld r4, HSTATE_KVM_VCPU(r13)
91: 91:
#endif #endif
...@@ -1777,7 +1780,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) ...@@ -1777,7 +1780,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/* /*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
*/ */
mr r3, r9
ld r4, VCPU_MSR(r3)
bl kvmppc_save_tm_hv bl kvmppc_save_tm_hv
ld r9, HSTATE_KVM_VCPU(r13)
91: 91:
#endif #endif
...@@ -2680,7 +2686,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) ...@@ -2680,7 +2686,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/* /*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
*/ */
ld r9, HSTATE_KVM_VCPU(r13) ld r3, HSTATE_KVM_VCPU(r13)
ld r4, VCPU_MSR(r3)
bl kvmppc_save_tm_hv bl kvmppc_save_tm_hv
91: 91:
#endif #endif
...@@ -2799,7 +2806,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) ...@@ -2799,7 +2806,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/* /*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
*/ */
mr r3, r4
ld r4, VCPU_MSR(r3)
bl kvmppc_restore_tm_hv bl kvmppc_restore_tm_hv
ld r4, HSTATE_KVM_VCPU(r13)
91: 91:
#endif #endif
...@@ -3120,9 +3130,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -3120,9 +3130,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* /*
* Save transactional state and TM-related registers. * Save transactional state and TM-related registers.
* Called with r9 pointing to the vcpu struct. * Called with r3 pointing to the vcpu struct and r4 containing
* the guest MSR value.
* This can modify all checkpointed registers, but * This can modify all checkpointed registers, but
* restores r1, r2 and r9 (vcpu pointer) before exit. * restores r1 and r2 before exit.
*/ */
kvmppc_save_tm_hv: kvmppc_save_tm_hv:
/* See if we need to handle fake suspend mode */ /* See if we need to handle fake suspend mode */
...@@ -3205,9 +3216,10 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) ...@@ -3205,9 +3216,10 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
/* /*
* Restore transactional state and TM-related registers. * Restore transactional state and TM-related registers.
* Called with r4 pointing to the vcpu struct. * Called with r3 pointing to the vcpu struct
* and r4 containing the guest MSR value.
* This potentially modifies all checkpointed registers. * This potentially modifies all checkpointed registers.
* It restores r1, r2, r4 from the PACA. * It restores r1 and r2 from the PACA.
*/ */
kvmppc_restore_tm_hv: kvmppc_restore_tm_hv:
/* /*
...@@ -3234,15 +3246,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) ...@@ -3234,15 +3246,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
* The user may change these outside of a transaction, so they must * The user may change these outside of a transaction, so they must
* always be context switched. * always be context switched.
*/ */
ld r5, VCPU_TFHAR(r4) ld r5, VCPU_TFHAR(r3)
ld r6, VCPU_TFIAR(r4) ld r6, VCPU_TFIAR(r3)
ld r7, VCPU_TEXASR(r4) ld r7, VCPU_TEXASR(r3)
mtspr SPRN_TFHAR, r5 mtspr SPRN_TFHAR, r5
mtspr SPRN_TFIAR, r6 mtspr SPRN_TFIAR, r6
mtspr SPRN_TEXASR, r7 mtspr SPRN_TEXASR, r7
ld r5, VCPU_MSR(r4) rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
beqlr /* TM not active in guest */ beqlr /* TM not active in guest */
/* Make sure the failure summary is set */ /* Make sure the failure summary is set */
...@@ -3255,10 +3266,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) ...@@ -3255,10 +3266,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
b 9f /* and return */ b 9f /* and return */
10: stdu r1, -PPC_MIN_STKFRM(r1) 10: stdu r1, -PPC_MIN_STKFRM(r1)
/* guest is in transactional state, so simulate rollback */ /* guest is in transactional state, so simulate rollback */
mr r3, r4
bl kvmhv_emulate_tm_rollback bl kvmhv_emulate_tm_rollback
nop nop
ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */
addi r1, r1, PPC_MIN_STKFRM addi r1, r1, PPC_MIN_STKFRM
9: ld r0, PPC_LR_STKOFF(r1) 9: ld r0, PPC_LR_STKOFF(r1)
mtlr r0 mtlr r0
......
...@@ -26,9 +26,12 @@ ...@@ -26,9 +26,12 @@
/* /*
* Save transactional state and TM-related registers. * Save transactional state and TM-related registers.
* Called with r9 pointing to the vcpu struct. * Called with:
* - r3 pointing to the vcpu struct
* - r4 points to the MSR with current TS bits:
* (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
* This can modify all checkpointed registers, but * This can modify all checkpointed registers, but
* restores r1, r2 and r9 (vcpu pointer) before exit. * restores r1, r2 before exit.
*/ */
_GLOBAL(kvmppc_save_tm) _GLOBAL(kvmppc_save_tm)
mflr r0 mflr r0
...@@ -40,20 +43,17 @@ _GLOBAL(kvmppc_save_tm) ...@@ -40,20 +43,17 @@ _GLOBAL(kvmppc_save_tm)
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r8 mtmsrd r8
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
ld r5, VCPU_MSR(r9)
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
beq 1f /* TM not active in guest. */ beq 1f /* TM not active in guest. */
#endif
std r1, HSTATE_HOST_R1(r13) std r1, HSTATE_SCRATCH2(r13)
li r3, TM_CAUSE_KVM_RESCHED std r3, HSTATE_SCRATCH1(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Emulation of the treclaim instruction needs TEXASR before treclaim */ /* Emulation of the treclaim instruction needs TEXASR before treclaim */
mfspr r6, SPRN_TEXASR mfspr r6, SPRN_TEXASR
std r6, VCPU_ORIG_TEXASR(r9) std r6, VCPU_ORIG_TEXASR(r3)
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
#endif #endif
...@@ -61,6 +61,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) ...@@ -61,6 +61,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
li r5, 0 li r5, 0
mtmsrd r5, 1 mtmsrd r5, 1
li r3, TM_CAUSE_KVM_RESCHED
/* All GPRs are volatile at this point. */ /* All GPRs are volatile at this point. */
TRECLAIM(R3) TRECLAIM(R3)
...@@ -68,9 +70,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) ...@@ -68,9 +70,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
SET_SCRATCH0(r13) SET_SCRATCH0(r13)
GET_PACA(r13) GET_PACA(r13)
std r9, PACATMSCRATCH(r13) std r9, PACATMSCRATCH(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE ld r9, HSTATE_SCRATCH1(r13)
ld r9, HSTATE_KVM_VCPU(r13)
#endif
/* Get a few more GPRs free. */ /* Get a few more GPRs free. */
std r29, VCPU_GPRS_TM(29)(r9) std r29, VCPU_GPRS_TM(29)(r9)
...@@ -102,7 +102,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) ...@@ -102,7 +102,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
std r4, VCPU_GPRS_TM(9)(r9) std r4, VCPU_GPRS_TM(9)(r9)
/* Reload stack pointer and TOC. */ /* Reload stack pointer and TOC. */
ld r1, HSTATE_HOST_R1(r13) ld r1, HSTATE_SCRATCH2(r13)
ld r2, PACATOC(r13) ld r2, PACATOC(r13)
/* Set MSR RI now we have r1 and r13 back. */ /* Set MSR RI now we have r1 and r13 back. */
...@@ -156,9 +156,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) ...@@ -156,9 +156,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
/* /*
* Restore transactional state and TM-related registers. * Restore transactional state and TM-related registers.
* Called with r4 pointing to the vcpu struct. * Called with:
* - r3 pointing to the vcpu struct.
* - r4 is the guest MSR with desired TS bits:
* For HV KVM, it is VCPU_MSR
* For PR KVM, it is provided by caller
* This potentially modifies all checkpointed registers. * This potentially modifies all checkpointed registers.
* It restores r1, r2, r4 from the PACA. * It restores r1, r2 from the PACA.
*/ */
_GLOBAL(kvmppc_restore_tm) _GLOBAL(kvmppc_restore_tm)
mflr r0 mflr r0
...@@ -177,19 +181,17 @@ _GLOBAL(kvmppc_restore_tm) ...@@ -177,19 +181,17 @@ _GLOBAL(kvmppc_restore_tm)
* The user may change these outside of a transaction, so they must * The user may change these outside of a transaction, so they must
* always be context switched. * always be context switched.
*/ */
ld r5, VCPU_TFHAR(r4) ld r5, VCPU_TFHAR(r3)
ld r6, VCPU_TFIAR(r4) ld r6, VCPU_TFIAR(r3)
ld r7, VCPU_TEXASR(r4) ld r7, VCPU_TEXASR(r3)
mtspr SPRN_TFHAR, r5 mtspr SPRN_TFHAR, r5
mtspr SPRN_TFIAR, r6 mtspr SPRN_TFIAR, r6
mtspr SPRN_TEXASR, r7 mtspr SPRN_TEXASR, r7
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE mr r5, r4
ld r5, VCPU_MSR(r4)
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
beqlr /* TM not active in guest */ beqlr /* TM not active in guest */
#endif std r1, HSTATE_SCRATCH2(r13)
std r1, HSTATE_HOST_R1(r13)
/* Make sure the failure summary is set, otherwise we'll program check /* Make sure the failure summary is set, otherwise we'll program check
* when we trechkpt. It's possible that this might have been not set * when we trechkpt. It's possible that this might have been not set
...@@ -205,21 +207,21 @@ _GLOBAL(kvmppc_restore_tm) ...@@ -205,21 +207,21 @@ _GLOBAL(kvmppc_restore_tm)
* some SPRs. * some SPRs.
*/ */
mr r31, r4 mr r31, r3
addi r3, r31, VCPU_FPRS_TM addi r3, r31, VCPU_FPRS_TM
bl load_fp_state bl load_fp_state
addi r3, r31, VCPU_VRS_TM addi r3, r31, VCPU_VRS_TM
bl load_vr_state bl load_vr_state
mr r4, r31 mr r3, r31
lwz r7, VCPU_VRSAVE_TM(r4) lwz r7, VCPU_VRSAVE_TM(r3)
mtspr SPRN_VRSAVE, r7 mtspr SPRN_VRSAVE, r7
ld r5, VCPU_LR_TM(r4) ld r5, VCPU_LR_TM(r3)
lwz r6, VCPU_CR_TM(r4) lwz r6, VCPU_CR_TM(r3)
ld r7, VCPU_CTR_TM(r4) ld r7, VCPU_CTR_TM(r3)
ld r8, VCPU_AMR_TM(r4) ld r8, VCPU_AMR_TM(r3)
ld r9, VCPU_TAR_TM(r4) ld r9, VCPU_TAR_TM(r3)
ld r10, VCPU_XER_TM(r4) ld r10, VCPU_XER_TM(r3)
mtlr r5 mtlr r5
mtcr r6 mtcr r6
mtctr r7 mtctr r7
...@@ -232,8 +234,8 @@ _GLOBAL(kvmppc_restore_tm) ...@@ -232,8 +234,8 @@ _GLOBAL(kvmppc_restore_tm)
* till the last moment to avoid running with userspace PPR and DSCR for * till the last moment to avoid running with userspace PPR and DSCR for
* too long. * too long.
*/ */
ld r29, VCPU_DSCR_TM(r4) ld r29, VCPU_DSCR_TM(r3)
ld r30, VCPU_PPR_TM(r4) ld r30, VCPU_PPR_TM(r3)
std r2, PACATMSCRATCH(r13) /* Save TOC */ std r2, PACATMSCRATCH(r13) /* Save TOC */
...@@ -265,9 +267,8 @@ _GLOBAL(kvmppc_restore_tm) ...@@ -265,9 +267,8 @@ _GLOBAL(kvmppc_restore_tm)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
ld r29, HSTATE_DSCR(r13) ld r29, HSTATE_DSCR(r13)
mtspr SPRN_DSCR, r29 mtspr SPRN_DSCR, r29
ld r4, HSTATE_KVM_VCPU(r13)
#endif #endif
ld r1, HSTATE_HOST_R1(r13) ld r1, HSTATE_SCRATCH2(r13)
ld r2, PACATMSCRATCH(r13) ld r2, PACATMSCRATCH(r13)
/* Set the MSR RI since we have our registers back. */ /* Set the MSR RI since we have our registers back. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment