Commit 1143a706 authored by Simon Guo's avatar Simon Guo Committed by Paul Mackerras

KVM: PPC: Add pt_regs into kvm_vcpu_arch and move vcpu->arch.gpr[] into it

Current regs are scattered at kvm_vcpu_arch structure and it will
be more neat to organize them into pt_regs structure.

Also it will enable reimplementation of MMIO emulation code with
analyse_instr() later.
Signed-off-by: default avatarSimon Guo <wei.guo.simon@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 9c9e9cf4
...@@ -275,12 +275,12 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) ...@@ -275,12 +275,12 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{ {
vcpu->arch.gpr[num] = val; vcpu->arch.regs.gpr[num] = val;
} }
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{ {
return vcpu->arch.gpr[num]; return vcpu->arch.regs.gpr[num];
} }
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
......
...@@ -490,8 +490,8 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) ...@@ -490,8 +490,8 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
vcpu->arch.ppr = vcpu->arch.ppr_tm; vcpu->arch.ppr = vcpu->arch.ppr_tm;
vcpu->arch.dscr = vcpu->arch.dscr_tm; vcpu->arch.dscr = vcpu->arch.dscr_tm;
vcpu->arch.tar = vcpu->arch.tar_tm; vcpu->arch.tar = vcpu->arch.tar_tm;
memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm, memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
sizeof(vcpu->arch.gpr)); sizeof(vcpu->arch.regs.gpr));
vcpu->arch.fp = vcpu->arch.fp_tm; vcpu->arch.fp = vcpu->arch.fp_tm;
vcpu->arch.vr = vcpu->arch.vr_tm; vcpu->arch.vr = vcpu->arch.vr_tm;
vcpu->arch.vrsave = vcpu->arch.vrsave_tm; vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
...@@ -507,8 +507,8 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) ...@@ -507,8 +507,8 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
vcpu->arch.ppr_tm = vcpu->arch.ppr; vcpu->arch.ppr_tm = vcpu->arch.ppr;
vcpu->arch.dscr_tm = vcpu->arch.dscr; vcpu->arch.dscr_tm = vcpu->arch.dscr;
vcpu->arch.tar_tm = vcpu->arch.tar; vcpu->arch.tar_tm = vcpu->arch.tar;
memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr, memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
sizeof(vcpu->arch.gpr)); sizeof(vcpu->arch.regs.gpr));
vcpu->arch.fp_tm = vcpu->arch.fp; vcpu->arch.fp_tm = vcpu->arch.fp;
vcpu->arch.vr_tm = vcpu->arch.vr; vcpu->arch.vr_tm = vcpu->arch.vr;
vcpu->arch.vrsave_tm = vcpu->arch.vrsave; vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
......
...@@ -36,12 +36,12 @@ ...@@ -36,12 +36,12 @@
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{ {
vcpu->arch.gpr[num] = val; vcpu->arch.regs.gpr[num] = val;
} }
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{ {
return vcpu->arch.gpr[num]; return vcpu->arch.regs.gpr[num];
} }
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
......
...@@ -486,7 +486,7 @@ struct kvm_vcpu_arch { ...@@ -486,7 +486,7 @@ struct kvm_vcpu_arch {
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
#endif #endif
ulong gpr[32]; struct pt_regs regs;
struct thread_fp_state fp; struct thread_fp_state fp;
......
...@@ -425,7 +425,7 @@ int main(void) ...@@ -425,7 +425,7 @@ int main(void)
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr); OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr);
OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
......
...@@ -609,7 +609,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -609,7 +609,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
page = stt->pages[idx / TCES_PER_PAGE]; page = stt->pages[idx / TCES_PER_PAGE];
tbl = (u64 *)page_address(page); tbl = (u64 *)page_address(page);
vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
return H_SUCCESS; return H_SUCCESS;
} }
......
...@@ -211,9 +211,9 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu) ...@@ -211,9 +211,9 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu)
/* Only need to do the expensive mfmsr() on radix */ /* Only need to do the expensive mfmsr() on radix */
if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
r = powernv_get_random_long(&vcpu->arch.gpr[4]); r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
else else
r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]); r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
if (r) if (r)
return H_SUCCESS; return H_SUCCESS;
...@@ -562,7 +562,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) ...@@ -562,7 +562,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{ {
if (!kvmppc_xics_enabled(vcpu)) if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD; return H_TOO_HARD;
vcpu->arch.gpr[5] = get_tb(); vcpu->arch.regs.gpr[5] = get_tb();
if (xive_enabled()) { if (xive_enabled()) {
if (is_rm()) if (is_rm())
return xive_rm_h_xirr(vcpu); return xive_rm_h_xirr(vcpu);
......
...@@ -418,7 +418,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -418,7 +418,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel) long pte_index, unsigned long pteh, unsigned long ptel)
{ {
return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]);
} }
#ifdef __BIG_ENDIAN__ #ifdef __BIG_ENDIAN__
...@@ -561,13 +562,13 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -561,13 +562,13 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn) unsigned long pte_index, unsigned long avpn)
{ {
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.gpr[4]); &vcpu->arch.regs.gpr[4]);
} }
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
unsigned long *args = &vcpu->arch.gpr[4]; unsigned long *args = &vcpu->arch.regs.gpr[4];
__be64 *hp, *hptes[4]; __be64 *hp, *hptes[4];
unsigned long tlbrb[4]; unsigned long tlbrb[4];
long int i, j, k, n, found, indexes[4]; long int i, j, k, n, found, indexes[4];
...@@ -787,8 +788,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -787,8 +788,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
r &= ~HPTE_GR_RESERVED; r &= ~HPTE_GR_RESERVED;
} }
vcpu->arch.gpr[4 + i * 2] = v; vcpu->arch.regs.gpr[4 + i * 2] = v;
vcpu->arch.gpr[5 + i * 2] = r; vcpu->arch.regs.gpr[5 + i * 2] = r;
} }
return H_SUCCESS; return H_SUCCESS;
} }
...@@ -834,7 +835,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -834,7 +835,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
} }
} }
} }
vcpu->arch.gpr[4] = gr; vcpu->arch.regs.gpr[4] = gr;
ret = H_SUCCESS; ret = H_SUCCESS;
out: out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
...@@ -881,7 +882,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -881,7 +882,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
kvmppc_set_dirty_from_hpte(kvm, v, gr); kvmppc_set_dirty_from_hpte(kvm, v, gr);
} }
} }
vcpu->arch.gpr[4] = gr; vcpu->arch.regs.gpr[4] = gr;
ret = H_SUCCESS; ret = H_SUCCESS;
out: out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
......
...@@ -517,7 +517,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) ...@@ -517,7 +517,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
} while (!icp_rm_try_update(icp, old_state, new_state)); } while (!icp_rm_try_update(icp, old_state, new_state));
/* Return the result in GPR4 */ /* Return the result in GPR4 */
vcpu->arch.gpr[4] = xirr; vcpu->arch.regs.gpr[4] = xirr;
return check_too_hard(xics, icp); return check_too_hard(xics, icp);
} }
......
...@@ -147,20 +147,20 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) ...@@ -147,20 +147,20 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->gpr[0] = vcpu->arch.gpr[0]; svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
svcpu->gpr[1] = vcpu->arch.gpr[1]; svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
svcpu->gpr[2] = vcpu->arch.gpr[2]; svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
svcpu->gpr[3] = vcpu->arch.gpr[3]; svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
svcpu->gpr[4] = vcpu->arch.gpr[4]; svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
svcpu->gpr[5] = vcpu->arch.gpr[5]; svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
svcpu->gpr[6] = vcpu->arch.gpr[6]; svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
svcpu->gpr[7] = vcpu->arch.gpr[7]; svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
svcpu->gpr[8] = vcpu->arch.gpr[8]; svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
svcpu->gpr[9] = vcpu->arch.gpr[9]; svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
svcpu->gpr[10] = vcpu->arch.gpr[10]; svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
svcpu->gpr[11] = vcpu->arch.gpr[11]; svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
svcpu->gpr[12] = vcpu->arch.gpr[12]; svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
svcpu->gpr[13] = vcpu->arch.gpr[13]; svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
svcpu->cr = vcpu->arch.cr; svcpu->cr = vcpu->arch.cr;
svcpu->xer = vcpu->arch.xer; svcpu->xer = vcpu->arch.xer;
svcpu->ctr = vcpu->arch.ctr; svcpu->ctr = vcpu->arch.ctr;
...@@ -194,20 +194,20 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) ...@@ -194,20 +194,20 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
if (!svcpu->in_use) if (!svcpu->in_use)
goto out; goto out;
vcpu->arch.gpr[0] = svcpu->gpr[0]; vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
vcpu->arch.gpr[1] = svcpu->gpr[1]; vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
vcpu->arch.gpr[2] = svcpu->gpr[2]; vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
vcpu->arch.gpr[3] = svcpu->gpr[3]; vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
vcpu->arch.gpr[4] = svcpu->gpr[4]; vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
vcpu->arch.gpr[5] = svcpu->gpr[5]; vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
vcpu->arch.gpr[6] = svcpu->gpr[6]; vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
vcpu->arch.gpr[7] = svcpu->gpr[7]; vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
vcpu->arch.gpr[8] = svcpu->gpr[8]; vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
vcpu->arch.gpr[9] = svcpu->gpr[9]; vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
vcpu->arch.gpr[10] = svcpu->gpr[10]; vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
vcpu->arch.gpr[11] = svcpu->gpr[11]; vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
vcpu->arch.gpr[12] = svcpu->gpr[12]; vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
vcpu->arch.gpr[13] = svcpu->gpr[13]; vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
vcpu->arch.cr = svcpu->cr; vcpu->arch.cr = svcpu->cr;
vcpu->arch.xer = svcpu->xer; vcpu->arch.xer = svcpu->xer;
vcpu->arch.ctr = svcpu->ctr; vcpu->arch.ctr = svcpu->ctr;
......
...@@ -334,7 +334,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) ...@@ -334,7 +334,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
*/ */
/* Return interrupt and old CPPR in GPR4 */ /* Return interrupt and old CPPR in GPR4 */
vcpu->arch.gpr[4] = hirq | (old_cppr << 24); vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
return H_SUCCESS; return H_SUCCESS;
} }
...@@ -369,7 +369,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long ...@@ -369,7 +369,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long
hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
/* Return interrupt and old CPPR in GPR4 */ /* Return interrupt and old CPPR in GPR4 */
vcpu->arch.gpr[4] = hirq | (xc->cppr << 24); vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
return H_SUCCESS; return H_SUCCESS;
} }
......
...@@ -53,7 +53,7 @@ static int dbell2prio(ulong param) ...@@ -53,7 +53,7 @@ static int dbell2prio(ulong param)
static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
{ {
ulong param = vcpu->arch.gpr[rb]; ulong param = vcpu->arch.regs.gpr[rb];
int prio = dbell2prio(param); int prio = dbell2prio(param);
if (prio < 0) if (prio < 0)
...@@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) ...@@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
{ {
ulong param = vcpu->arch.gpr[rb]; ulong param = vcpu->arch.regs.gpr[rb];
int prio = dbell2prio(rb); int prio = dbell2prio(rb);
int pir = param & PPC_DBELL_PIR_MASK; int pir = param & PPC_DBELL_PIR_MASK;
int i; int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment