Commit 173c520a authored by Simon Guo's avatar Simon Guo Committed by Paul Mackerras

KVM: PPC: Move nip/ctr/lr/xer registers to pt_regs in kvm_vcpu_arch

This patch moves nip/ctr/lr/xer registers from scattered places in
kvm_vcpu_arch to pt_regs structure.

cr register is "unsigned long" in pt_regs and u32 in vcpu->arch.
It will need more consideration and may move in later patches.
Signed-off-by: default avatarSimon Guo <wei.guo.simon@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 1143a706
...@@ -295,42 +295,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) ...@@ -295,42 +295,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{ {
vcpu->arch.xer = val; vcpu->arch.regs.xer = val;
} }
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.xer; return vcpu->arch.regs.xer;
} }
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{ {
vcpu->arch.ctr = val; vcpu->arch.regs.ctr = val;
} }
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.ctr; return vcpu->arch.regs.ctr;
} }
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{ {
vcpu->arch.lr = val; vcpu->arch.regs.link = val;
} }
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.lr; return vcpu->arch.regs.link;
} }
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{ {
vcpu->arch.pc = val; vcpu->arch.regs.nip = val;
} }
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.pc; return vcpu->arch.regs.nip;
} }
static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
......
...@@ -483,9 +483,9 @@ static inline u64 sanitize_msr(u64 msr) ...@@ -483,9 +483,9 @@ static inline u64 sanitize_msr(u64 msr)
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.cr = vcpu->arch.cr_tm; vcpu->arch.cr = vcpu->arch.cr_tm;
vcpu->arch.xer = vcpu->arch.xer_tm; vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu->arch.lr = vcpu->arch.lr_tm; vcpu->arch.regs.link = vcpu->arch.lr_tm;
vcpu->arch.ctr = vcpu->arch.ctr_tm; vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
vcpu->arch.amr = vcpu->arch.amr_tm; vcpu->arch.amr = vcpu->arch.amr_tm;
vcpu->arch.ppr = vcpu->arch.ppr_tm; vcpu->arch.ppr = vcpu->arch.ppr_tm;
vcpu->arch.dscr = vcpu->arch.dscr_tm; vcpu->arch.dscr = vcpu->arch.dscr_tm;
...@@ -500,9 +500,9 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) ...@@ -500,9 +500,9 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.cr_tm = vcpu->arch.cr; vcpu->arch.cr_tm = vcpu->arch.cr;
vcpu->arch.xer_tm = vcpu->arch.xer; vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu->arch.lr_tm = vcpu->arch.lr; vcpu->arch.lr_tm = vcpu->arch.regs.link;
vcpu->arch.ctr_tm = vcpu->arch.ctr; vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
vcpu->arch.amr_tm = vcpu->arch.amr; vcpu->arch.amr_tm = vcpu->arch.amr;
vcpu->arch.ppr_tm = vcpu->arch.ppr; vcpu->arch.ppr_tm = vcpu->arch.ppr;
vcpu->arch.dscr_tm = vcpu->arch.dscr; vcpu->arch.dscr_tm = vcpu->arch.dscr;
......
...@@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) ...@@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{ {
vcpu->arch.xer = val; vcpu->arch.regs.xer = val;
} }
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.xer; return vcpu->arch.regs.xer;
} }
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
...@@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) ...@@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{ {
vcpu->arch.ctr = val; vcpu->arch.regs.ctr = val;
} }
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.ctr; return vcpu->arch.regs.ctr;
} }
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{ {
vcpu->arch.lr = val; vcpu->arch.regs.link = val;
} }
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.lr; return vcpu->arch.regs.link;
} }
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{ {
vcpu->arch.pc = val; vcpu->arch.regs.nip = val;
} }
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.pc; return vcpu->arch.regs.nip;
} }
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
......
...@@ -521,14 +521,10 @@ struct kvm_vcpu_arch { ...@@ -521,14 +521,10 @@ struct kvm_vcpu_arch {
u32 qpr[32]; u32 qpr[32];
#endif #endif
ulong pc;
ulong ctr;
ulong lr;
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
ulong tar; ulong tar;
#endif #endif
ulong xer;
u32 cr; u32 cr;
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
......
...@@ -431,14 +431,14 @@ int main(void) ...@@ -431,14 +431,14 @@ int main(void)
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
#endif #endif
OFFSET(VCPU_XER, kvm_vcpu, arch.xer); OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
OFFSET(VCPU_LR, kvm_vcpu, arch.lr); OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
#endif #endif
OFFSET(VCPU_CR, kvm_vcpu, arch.cr); OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
OFFSET(VCPU_PC, kvm_vcpu, arch.pc); OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
...@@ -695,10 +695,10 @@ int main(void) ...@@ -695,10 +695,10 @@ int main(void)
#else /* CONFIG_PPC_BOOK3S */ #else /* CONFIG_PPC_BOOK3S */
OFFSET(VCPU_CR, kvm_vcpu, arch.cr); OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
OFFSET(VCPU_XER, kvm_vcpu, arch.xer); OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
OFFSET(VCPU_LR, kvm_vcpu, arch.lr); OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
OFFSET(VCPU_PC, kvm_vcpu, arch.pc); OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
static inline bool check_debug_ip(struct kvm_vcpu *vcpu) static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
{ {
#ifdef DEBUG_MMU_PTE_IP #ifdef DEBUG_MMU_PTE_IP
return vcpu->arch.pc == DEBUG_MMU_PTE_IP; return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP;
#else #else
return true; return true;
#endif #endif
......
...@@ -397,13 +397,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) ...@@ -397,13 +397,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
pr_err("pc = %.16lx msr = %.16llx trap = %x\n", pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
for (r = 0; r < 16; ++r) for (r = 0; r < 16; ++r)
pr_err("r%2d = %.16lx r%d = %.16lx\n", pr_err("r%2d = %.16lx r%d = %.16lx\n",
r, kvmppc_get_gpr(vcpu, r), r, kvmppc_get_gpr(vcpu, r),
r+16, kvmppc_get_gpr(vcpu, r+16)); r+16, kvmppc_get_gpr(vcpu, r+16));
pr_err("ctr = %.16lx lr = %.16lx\n", pr_err("ctr = %.16lx lr = %.16lx\n",
vcpu->arch.ctr, vcpu->arch.lr); vcpu->arch.regs.ctr, vcpu->arch.regs.link);
pr_err("srr0 = %.16llx srr1 = %.16llx\n", pr_err("srr0 = %.16llx srr1 = %.16llx\n",
vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
...@@ -411,7 +411,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) ...@@ -411,7 +411,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
pr_err("fault dar = %.16lx dsisr = %.8x\n", pr_err("fault dar = %.16lx dsisr = %.8x\n",
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
......
...@@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) ...@@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
u64 texasr, tfiar; u64 texasr, tfiar;
u64 msr = vcpu->arch.shregs.msr; u64 msr = vcpu->arch.shregs.msr;
tfiar = vcpu->arch.pc & ~0x3ull; tfiar = vcpu->arch.regs.nip & ~0x3ull;
texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
texasr |= TEXASR_SUSP; texasr |= TEXASR_SUSP;
...@@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) ...@@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
(newmsr & MSR_TM))); (newmsr & MSR_TM)));
newmsr = sanitize_msr(newmsr); newmsr = sanitize_msr(newmsr);
vcpu->arch.shregs.msr = newmsr; vcpu->arch.shregs.msr = newmsr;
vcpu->arch.cfar = vcpu->arch.pc - 4; vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.pc = vcpu->arch.shregs.srr0; vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
return RESUME_GUEST; return RESUME_GUEST;
case PPC_INST_RFEBB: case PPC_INST_RFEBB:
...@@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) ...@@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.bescr = bescr; vcpu->arch.bescr = bescr;
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = msr; vcpu->arch.shregs.msr = msr;
vcpu->arch.cfar = vcpu->arch.pc - 4; vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.pc = vcpu->arch.ebbrr; vcpu->arch.regs.nip = vcpu->arch.ebbrr;
return RESUME_GUEST; return RESUME_GUEST;
case PPC_INST_MTMSRD: case PPC_INST_MTMSRD:
......
...@@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) ...@@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
return 0; return 0;
newmsr = sanitize_msr(newmsr); newmsr = sanitize_msr(newmsr);
vcpu->arch.shregs.msr = newmsr; vcpu->arch.shregs.msr = newmsr;
vcpu->arch.cfar = vcpu->arch.pc - 4; vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.pc = vcpu->arch.shregs.srr0; vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
return 1; return 1;
case PPC_INST_RFEBB: case PPC_INST_RFEBB:
...@@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) ...@@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
mtspr(SPRN_BESCR, bescr); mtspr(SPRN_BESCR, bescr);
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = msr; vcpu->arch.shregs.msr = msr;
vcpu->arch.cfar = vcpu->arch.pc - 4; vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.pc = mfspr(SPRN_EBBRR); vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
return 1; return 1;
case PPC_INST_MTMSRD: case PPC_INST_MTMSRD:
...@@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) ...@@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
vcpu->arch.pc = vcpu->arch.tfhar; vcpu->arch.regs.nip = vcpu->arch.tfhar;
copy_from_checkpoint(vcpu); copy_from_checkpoint(vcpu);
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
} }
...@@ -162,10 +162,10 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) ...@@ -162,10 +162,10 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
svcpu->cr = vcpu->arch.cr; svcpu->cr = vcpu->arch.cr;
svcpu->xer = vcpu->arch.xer; svcpu->xer = vcpu->arch.regs.xer;
svcpu->ctr = vcpu->arch.ctr; svcpu->ctr = vcpu->arch.regs.ctr;
svcpu->lr = vcpu->arch.lr; svcpu->lr = vcpu->arch.regs.link;
svcpu->pc = vcpu->arch.pc; svcpu->pc = vcpu->arch.regs.nip;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
svcpu->shadow_fscr = vcpu->arch.shadow_fscr; svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
#endif #endif
...@@ -209,10 +209,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) ...@@ -209,10 +209,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
vcpu->arch.cr = svcpu->cr; vcpu->arch.cr = svcpu->cr;
vcpu->arch.xer = svcpu->xer; vcpu->arch.regs.xer = svcpu->xer;
vcpu->arch.ctr = svcpu->ctr; vcpu->arch.regs.ctr = svcpu->ctr;
vcpu->arch.lr = svcpu->lr; vcpu->arch.regs.link = svcpu->lr;
vcpu->arch.pc = svcpu->pc; vcpu->arch.regs.nip = svcpu->pc;
vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dar = svcpu->fault_dar;
vcpu->arch.fault_dsisr = svcpu->fault_dsisr; vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
......
...@@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) ...@@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
{ {
int i; int i;
printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); vcpu->arch.shared->msr);
printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
vcpu->arch.regs.ctr);
printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
vcpu->arch.shared->srr1); vcpu->arch.shared->srr1);
...@@ -491,24 +493,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, ...@@ -491,24 +493,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
if (allowed) { if (allowed) {
switch (int_class) { switch (int_class) {
case INT_CLASS_NONCRIT: case INT_CLASS_NONCRIT:
set_guest_srr(vcpu, vcpu->arch.pc, set_guest_srr(vcpu, vcpu->arch.regs.nip,
vcpu->arch.shared->msr); vcpu->arch.shared->msr);
break; break;
case INT_CLASS_CRIT: case INT_CLASS_CRIT:
set_guest_csrr(vcpu, vcpu->arch.pc, set_guest_csrr(vcpu, vcpu->arch.regs.nip,
vcpu->arch.shared->msr); vcpu->arch.shared->msr);
break; break;
case INT_CLASS_DBG: case INT_CLASS_DBG:
set_guest_dsrr(vcpu, vcpu->arch.pc, set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
vcpu->arch.shared->msr); vcpu->arch.shared->msr);
break; break;
case INT_CLASS_MC: case INT_CLASS_MC:
set_guest_mcsrr(vcpu, vcpu->arch.pc, set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
vcpu->arch.shared->msr); vcpu->arch.shared->msr);
break; break;
} }
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; vcpu->arch.regs.nip = vcpu->arch.ivpr |
vcpu->arch.ivor[priority];
if (update_esr == true) if (update_esr == true)
kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true) if (update_dear == true)
...@@ -826,7 +829,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -826,7 +829,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EMULATE_FAIL: case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
__func__, vcpu->arch.pc, vcpu->arch.last_inst); __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and /* For debugging, encode the failing instruction and
* report it to userspace. */ * report it to userspace. */
run->hw.hardware_exit_reason = ~0ULL << 32; run->hw.hardware_exit_reason = ~0ULL << 32;
...@@ -875,7 +878,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -875,7 +878,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/ */
vcpu->arch.dbsr = 0; vcpu->arch.dbsr = 0;
run->debug.arch.status = 0; run->debug.arch.status = 0;
run->debug.arch.address = vcpu->arch.pc; run->debug.arch.address = vcpu->arch.regs.nip;
if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
...@@ -971,7 +974,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -971,7 +974,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
case EMULATE_FAIL: case EMULATE_FAIL:
pr_debug("%s: load instruction from guest address %lx failed\n", pr_debug("%s: load instruction from guest address %lx failed\n",
__func__, vcpu->arch.pc); __func__, vcpu->arch.regs.nip);
/* For debugging, encode the failing instruction and /* For debugging, encode the failing instruction and
* report it to userspace. */ * report it to userspace. */
run->hw.hardware_exit_reason = ~0ULL << 32; run->hw.hardware_exit_reason = ~0ULL << 32;
...@@ -1169,7 +1172,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1169,7 +1172,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOKE_INTERRUPT_SPE_FP_DATA: case BOOKE_INTERRUPT_SPE_FP_DATA:
case BOOKE_INTERRUPT_SPE_FP_ROUND: case BOOKE_INTERRUPT_SPE_FP_ROUND:
printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
__func__, exit_nr, vcpu->arch.pc); __func__, exit_nr, vcpu->arch.regs.nip);
run->hw.hardware_exit_reason = exit_nr; run->hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST; r = RESUME_HOST;
break; break;
...@@ -1299,7 +1302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1299,7 +1302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
case BOOKE_INTERRUPT_ITLB_MISS: { case BOOKE_INTERRUPT_ITLB_MISS: {
unsigned long eaddr = vcpu->arch.pc; unsigned long eaddr = vcpu->arch.regs.nip;
gpa_t gpaddr; gpa_t gpaddr;
gfn_t gfn; gfn_t gfn;
int gtlb_index; int gtlb_index;
...@@ -1391,7 +1394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1391,7 +1394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
int i; int i;
int r; int r;
vcpu->arch.pc = 0; vcpu->arch.regs.nip = 0;
vcpu->arch.shared->pir = vcpu->vcpu_id; vcpu->arch.shared->pir = vcpu->vcpu_id;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
kvmppc_set_msr(vcpu, 0); kvmppc_set_msr(vcpu, 0);
...@@ -1440,10 +1443,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -1440,10 +1443,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu_load(vcpu); vcpu_load(vcpu);
regs->pc = vcpu->arch.pc; regs->pc = vcpu->arch.regs.nip;
regs->cr = kvmppc_get_cr(vcpu); regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr; regs->ctr = vcpu->arch.regs.ctr;
regs->lr = vcpu->arch.lr; regs->lr = vcpu->arch.regs.link;
regs->xer = kvmppc_get_xer(vcpu); regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.shared->msr; regs->msr = vcpu->arch.shared->msr;
regs->srr0 = kvmppc_get_srr0(vcpu); regs->srr0 = kvmppc_get_srr0(vcpu);
...@@ -1471,10 +1474,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -1471,10 +1474,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu_load(vcpu); vcpu_load(vcpu);
vcpu->arch.pc = regs->pc; vcpu->arch.regs.nip = regs->pc;
kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr; vcpu->arch.regs.ctr = regs->ctr;
vcpu->arch.lr = regs->lr; vcpu->arch.regs.link = regs->lr;
kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr); kvmppc_set_msr(vcpu, regs->msr);
kvmppc_set_srr0(vcpu, regs->srr0); kvmppc_set_srr0(vcpu, regs->srr0);
......
...@@ -34,19 +34,19 @@ ...@@ -34,19 +34,19 @@
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.pc = vcpu->arch.shared->srr0; vcpu->arch.regs.nip = vcpu->arch.shared->srr0;
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
} }
static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.pc = vcpu->arch.dsrr0; vcpu->arch.regs.nip = vcpu->arch.dsrr0;
kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
} }
static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.pc = vcpu->arch.csrr0; vcpu->arch.regs.nip = vcpu->arch.csrr0;
kvmppc_set_msr(vcpu, vcpu->arch.csrr1); kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
} }
......
...@@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_oc(inst)) { switch (get_oc(inst)) {
case EHPRIV_OC_DEBUG: case EHPRIV_OC_DEBUG:
run->exit_reason = KVM_EXIT_DEBUG; run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.address = vcpu->arch.pc; run->debug.arch.address = vcpu->arch.regs.nip;
run->debug.arch.status = 0; run->debug.arch.status = 0;
kvmppc_account_exit(vcpu, DEBUG_EXITS); kvmppc_account_exit(vcpu, DEBUG_EXITS);
emulated = EMULATE_EXIT_USER; emulated = EMULATE_EXIT_USER;
......
...@@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) ...@@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
{ {
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
} }
void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment