Commit ec05ce11 authored by Paul Mackerras's avatar Paul Mackerras Committed by Sasha Levin

KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state

[ Upstream commit 0d808df0 ]

When switching from/to a guest that has a transaction in progress,
we need to save/restore the checkpointed register state.  Although
XER is part of the CPU state that gets checkpointed, the code that
does this saving and restoring doesn't save/restore XER.

This fixes it by saving and restoring the XER.  To allow userspace
to read/write the checkpointed XER value, we also add a new ONE_REG
specifier.

The visible effect of this bug is that the guest may see its XER
value being corrupted when it uses transactions.

Fixes: e4e38121 ("KVM: PPC: Book3S HV: Add transactional memory support")
Fixes: 0a8eccef ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
Cc: stable@vger.kernel.org # v3.15+
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Reviewed-by: default avatarThomas Huth <thuth@redhat.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
parent b1a75800
...@@ -1955,6 +1955,7 @@ registers, find a list below: ...@@ -1955,6 +1955,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TM_VSCR | 32 PPC | KVM_REG_PPC_TM_VSCR | 32
PPC | KVM_REG_PPC_TM_DSCR | 64 PPC | KVM_REG_PPC_TM_DSCR | 64
PPC | KVM_REG_PPC_TM_TAR | 64 PPC | KVM_REG_PPC_TM_TAR | 64
PPC | KVM_REG_PPC_TM_XER | 64
| | | |
MIPS | KVM_REG_MIPS_R0 | 64 MIPS | KVM_REG_MIPS_R0 | 64
... ...
......
...@@ -532,6 +532,7 @@ struct kvm_vcpu_arch { ...@@ -532,6 +532,7 @@ struct kvm_vcpu_arch {
u64 tfiar; u64 tfiar;
u32 cr_tm; u32 cr_tm;
u64 xer_tm;
u64 lr_tm; u64 lr_tm;
u64 ctr_tm; u64 ctr_tm;
u64 amr_tm; u64 amr_tm;
......
...@@ -587,6 +587,7 @@ struct kvm_get_htab_header { ...@@ -587,6 +587,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67) #define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68) #define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69) #define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
/* PPC64 eXternal Interrupt Controller Specification */ /* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
......
...@@ -583,6 +583,7 @@ int main(void) ...@@ -583,6 +583,7 @@ int main(void)
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
......
...@@ -1171,6 +1171,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1171,6 +1171,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR: case KVM_REG_PPC_TM_CR:
*val = get_reg_val(id, vcpu->arch.cr_tm); *val = get_reg_val(id, vcpu->arch.cr_tm);
break; break;
case KVM_REG_PPC_TM_XER:
*val = get_reg_val(id, vcpu->arch.xer_tm);
break;
case KVM_REG_PPC_TM_LR: case KVM_REG_PPC_TM_LR:
*val = get_reg_val(id, vcpu->arch.lr_tm); *val = get_reg_val(id, vcpu->arch.lr_tm);
break; break;
...@@ -1378,6 +1381,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1378,6 +1381,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR: case KVM_REG_PPC_TM_CR:
vcpu->arch.cr_tm = set_reg_val(id, *val); vcpu->arch.cr_tm = set_reg_val(id, *val);
break; break;
case KVM_REG_PPC_TM_XER:
vcpu->arch.xer_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_LR: case KVM_REG_PPC_TM_LR:
vcpu->arch.lr_tm = set_reg_val(id, *val); vcpu->arch.lr_tm = set_reg_val(id, *val);
break; break;
......
...@@ -2399,11 +2399,13 @@ kvmppc_save_tm: ...@@ -2399,11 +2399,13 @@ kvmppc_save_tm:
mfctr r7 mfctr r7
mfspr r8, SPRN_AMR mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR mfspr r10, SPRN_TAR
mfxer r11
std r5, VCPU_LR_TM(r9) std r5, VCPU_LR_TM(r9)
stw r6, VCPU_CR_TM(r9) stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9) std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9) std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9) std r10, VCPU_TAR_TM(r9)
std r11, VCPU_XER_TM(r9)
/* Restore r12 as trap number. */ /* Restore r12 as trap number. */
lwz r12, VCPU_TRAP(r9) lwz r12, VCPU_TRAP(r9)
...@@ -2496,11 +2498,13 @@ kvmppc_restore_tm: ...@@ -2496,11 +2498,13 @@ kvmppc_restore_tm:
ld r7, VCPU_CTR_TM(r4) ld r7, VCPU_CTR_TM(r4)
ld r8, VCPU_AMR_TM(r4) ld r8, VCPU_AMR_TM(r4)
ld r9, VCPU_TAR_TM(r4) ld r9, VCPU_TAR_TM(r4)
ld r10, VCPU_XER_TM(r4)
mtlr r5 mtlr r5
mtcr r6 mtcr r6
mtctr r7 mtctr r7
mtspr SPRN_AMR, r8 mtspr SPRN_AMR, r8
mtspr SPRN_TAR, r9 mtspr SPRN_TAR, r9
mtxer r10
/* /*
* Load up PPR and DSCR values but don't put them in the actual SPRs * Load up PPR and DSCR values but don't put them in the actual SPRs
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment