Commit 09027ab7 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-ppc-next-4.18-2' of...

Merge tag 'kvm-ppc-next-4.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD
parents 273ba457 f61e0d3c
......@@ -134,7 +134,13 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
void pnv_power9_force_smt4_catch(void);
void pnv_power9_force_smt4_release(void);
/* Transaction memory related */
void tm_enable(void);
void tm_disable(void);
void tm_abort(uint8_t cause);
struct kvm_vcpu;
void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
......@@ -104,6 +104,7 @@ struct kvmppc_vcore {
ulong vtb; /* virtual timebase */
ulong conferring_threads;
unsigned int halt_poll_ns;
atomic_t online_count;
};
struct kvmppc_vcpu_book3s {
......@@ -209,6 +210,7 @@ extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec);
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
......@@ -256,6 +258,21 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
#else
static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
......@@ -274,12 +291,12 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
vcpu->arch.regs.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
return vcpu->arch.gpr[num];
return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
......@@ -294,42 +311,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.xer = val;
vcpu->arch.regs.xer = val;
}
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
return vcpu->arch.regs.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.ctr = val;
vcpu->arch.regs.ctr = val;
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.ctr;
return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.lr = val;
vcpu->arch.regs.link = val;
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.lr;
return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.pc = val;
vcpu->arch.regs.nip = val;
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pc;
return vcpu->arch.regs.nip;
}
static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
......
......@@ -483,15 +483,15 @@ static inline u64 sanitize_msr(u64 msr)
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
{
vcpu->arch.cr = vcpu->arch.cr_tm;
vcpu->arch.xer = vcpu->arch.xer_tm;
vcpu->arch.lr = vcpu->arch.lr_tm;
vcpu->arch.ctr = vcpu->arch.ctr_tm;
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu->arch.regs.link = vcpu->arch.lr_tm;
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
vcpu->arch.amr = vcpu->arch.amr_tm;
vcpu->arch.ppr = vcpu->arch.ppr_tm;
vcpu->arch.dscr = vcpu->arch.dscr_tm;
vcpu->arch.tar = vcpu->arch.tar_tm;
memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm,
sizeof(vcpu->arch.gpr));
memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
sizeof(vcpu->arch.regs.gpr));
vcpu->arch.fp = vcpu->arch.fp_tm;
vcpu->arch.vr = vcpu->arch.vr_tm;
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
......@@ -500,15 +500,15 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
{
vcpu->arch.cr_tm = vcpu->arch.cr;
vcpu->arch.xer_tm = vcpu->arch.xer;
vcpu->arch.lr_tm = vcpu->arch.lr;
vcpu->arch.ctr_tm = vcpu->arch.ctr;
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu->arch.lr_tm = vcpu->arch.regs.link;
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
vcpu->arch.amr_tm = vcpu->arch.amr;
vcpu->arch.ppr_tm = vcpu->arch.ppr;
vcpu->arch.dscr_tm = vcpu->arch.dscr;
vcpu->arch.tar_tm = vcpu->arch.tar;
memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr,
sizeof(vcpu->arch.gpr));
memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
sizeof(vcpu->arch.regs.gpr));
vcpu->arch.fp_tm = vcpu->arch.fp;
vcpu->arch.vr_tm = vcpu->arch.vr;
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
......
......@@ -36,12 +36,12 @@
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
vcpu->arch.regs.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
return vcpu->arch.gpr[num];
return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
......@@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.xer = val;
vcpu->arch.regs.xer = val;
}
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
return vcpu->arch.regs.xer;
}
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
......@@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.ctr = val;
vcpu->arch.regs.ctr = val;
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.ctr;
return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.lr = val;
vcpu->arch.regs.link = val;
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.lr;
return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.pc = val;
vcpu->arch.regs.nip = val;
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pc;
return vcpu->arch.regs.nip;
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
......
......@@ -269,7 +269,6 @@ struct kvm_arch {
unsigned long host_lpcr;
unsigned long sdr1;
unsigned long host_sdr1;
int tlbie_lock;
unsigned long lpcr;
unsigned long vrma_slb_v;
int mmu_ready;
......@@ -454,6 +453,12 @@ struct mmio_hpte_cache {
#define KVMPPC_VSX_COPY_WORD 1
#define KVMPPC_VSX_COPY_DWORD 2
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
#define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4
#define KVMPPC_VMX_COPY_BYTE 8
#define KVMPPC_VMX_COPY_HWORD 9
#define KVMPPC_VMX_COPY_WORD 10
#define KVMPPC_VMX_COPY_DWORD 11
struct openpic;
......@@ -486,7 +491,7 @@ struct kvm_vcpu_arch {
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
#endif
ulong gpr[32];
struct pt_regs regs;
struct thread_fp_state fp;
......@@ -521,14 +526,10 @@ struct kvm_vcpu_arch {
u32 qpr[32];
#endif
ulong pc;
ulong ctr;
ulong lr;
#ifdef CONFIG_PPC_BOOK3S
ulong tar;
#endif
ulong xer;
u32 cr;
#ifdef CONFIG_PPC_BOOK3S
......@@ -626,7 +627,6 @@ struct kvm_vcpu_arch {
struct thread_vr_state vr_tm;
u32 vrsave_tm; /* also USPRG0 */
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
......@@ -681,16 +681,17 @@ struct kvm_vcpu_arch {
* Number of simulations for vsx.
* If we use 2*8bytes to simulate 1*16bytes,
* then the number should be 2 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD.
* mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
* If we use 4*4bytes to simulate 1*16bytes,
* the number should be 4 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
*/
u8 mmio_vsx_copy_nums;
u8 mmio_vsx_offset;
u8 mmio_vsx_copy_type;
u8 mmio_vsx_tx_sx_enabled;
u8 mmio_vmx_copy_nums;
u8 mmio_vmx_offset;
u8 mmio_copy_type;
u8 osi_needed;
u8 osi_enabled;
u8 papr_enabled;
......@@ -772,6 +773,8 @@ struct kvm_vcpu_arch {
u64 busy_preempt;
u32 emul_inst;
u32 online;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
......
......@@ -52,7 +52,7 @@ enum emulation_result {
EMULATE_EXIT_USER, /* emulation requires exit to user-space */
};
enum instruction_type {
enum instruction_fetch_type {
INST_GENERIC,
INST_SC, /* system call */
};
......@@ -81,10 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian);
extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
......@@ -93,7 +93,7 @@ extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
int is_default_endian);
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_type type, u32 *inst);
enum instruction_fetch_type type, u32 *inst);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
......@@ -265,6 +265,8 @@ union kvmppc_one_reg {
vector128 vval;
u64 vsxval[2];
u32 vsx32val[4];
u16 vsx16val[8];
u8 vsx8val[16];
struct {
u64 addr;
u64 length;
......@@ -324,13 +326,14 @@ struct kvmppc_ops {
int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
unsigned long flags);
void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops;
static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
enum instruction_type type, u32 *inst)
enum instruction_fetch_type type, u32 *inst)
{
int ret = EMULATE_DONE;
u32 fetched_inst;
......
......@@ -385,6 +385,7 @@
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
#define SPRN_PMCR 0x374 /* Power Management Control Register */
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
/* HFSCR and FSCR bit numbers are the same */
#define FSCR_SCV_LG 12 /* Enable System Call Vectored */
......
......@@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
......
......@@ -426,20 +426,20 @@ int main(void)
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr);
OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr);
OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
#ifdef CONFIG_ALTIVEC
OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
#endif
OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
#ifdef CONFIG_PPC_BOOK3S
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
#endif
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
......@@ -696,10 +696,10 @@ int main(void)
#else /* CONFIG_PPC_BOOK3S */
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
......
......@@ -63,6 +63,9 @@ kvm-pr-y := \
book3s_64_mmu.o \
book3s_32_mmu.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
tm.o
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_rmhandlers.o
......
......@@ -134,7 +134,7 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
kvmppc_unfixup_split_real(vcpu);
kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
......@@ -256,18 +256,15 @@ void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
{
kvmppc_set_dar(vcpu, dar);
kvmppc_set_dsisr(vcpu, flags);
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); /* used by kvm_hv */
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
{
u64 msr = kvmppc_get_msr(vcpu);
msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
kvmppc_set_msr_fast(vcpu, msr);
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
......@@ -450,8 +447,8 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
return r;
}
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
u32 *inst)
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_fetch_type type, u32 *inst)
{
ulong pc = kvmppc_get_pc(vcpu);
int r;
......@@ -509,8 +506,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
vcpu_load(vcpu);
regs->pc = kvmppc_get_pc(vcpu);
regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = kvmppc_get_ctr(vcpu);
......@@ -532,7 +527,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
vcpu_put(vcpu);
return 0;
}
......@@ -540,8 +534,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
vcpu_load(vcpu);
kvmppc_set_pc(vcpu, regs->pc);
kvmppc_set_cr(vcpu, regs->cr);
kvmppc_set_ctr(vcpu, regs->ctr);
......@@ -562,7 +554,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
vcpu_put(vcpu);
return 0;
}
......
......@@ -31,4 +31,10 @@ extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
extern int kvmppc_book3s_init_pr(void);
extern void kvmppc_book3s_exit_pr(void);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val);
#else
static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {}
#endif
#endif
......@@ -52,7 +52,7 @@
static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
{
#ifdef DEBUG_MMU_PTE_IP
return vcpu->arch.pc == DEBUG_MMU_PTE_IP;
return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP;
#else
return true;
#endif
......
......@@ -38,7 +38,16 @@
static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
{
kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
unsigned long msr = vcpu->arch.intr_msr;
unsigned long cur_msr = kvmppc_get_msr(vcpu);
/* If transactional, change to suspend mode on IRQ delivery */
if (MSR_TM_TRANSACTIONAL(cur_msr))
msr |= MSR_TS_S;
else
msr |= cur_msr & MSR_TS_MASK;
kvmppc_set_msr(vcpu, msr);
}
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
......
......@@ -272,6 +272,9 @@ int kvmppc_mmu_hv_init(void)
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
return -EINVAL;
/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
host_lpid = mfspr(SPRN_LPID);
rsvd_lpid = LPID_RSVD;
......
This diff is collapsed.
......@@ -176,14 +176,12 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!tbltmp)
continue;
/*
* Make sure hardware table parameters are exactly the same;
* this is used in the TCE handlers where boundary checks
* use only the first attached table.
*/
if ((tbltmp->it_page_shift == stt->page_shift) &&
(tbltmp->it_offset == stt->offset) &&
(tbltmp->it_size == stt->size)) {
/* Make sure hardware table parameters are compatible */
if ((tbltmp->it_page_shift <= stt->page_shift) &&
(tbltmp->it_offset << tbltmp->it_page_shift ==
stt->offset << stt->page_shift) &&
(tbltmp->it_size << tbltmp->it_page_shift ==
stt->size << stt->page_shift)) {
/*
* Reference the table to avoid races with
* add/remove DMA windows.
......@@ -237,7 +235,7 @@ static void release_spapr_tce_table(struct rcu_head *head)
kfree(stt);
}
static int kvm_spapr_tce_fault(struct vm_fault *vmf)
static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
struct page *page;
......@@ -302,7 +300,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
int ret = -ENOMEM;
int i;
if (!args->size)
if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
return -EINVAL;
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
......@@ -396,7 +395,7 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
return H_SUCCESS;
}
static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
struct iommu_table *tbl, unsigned long entry)
{
enum dma_data_direction dir = DMA_NONE;
......@@ -416,7 +415,24 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
return ret;
}
long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
unsigned long entry)
{
unsigned long i, ret = H_SUCCESS;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry * subpages;
for (i = 0; i < subpages; ++i) {
ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
if (ret != H_SUCCESS)
break;
}
return ret;
}
long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
......@@ -453,6 +469,27 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
return 0;
}
static long kvmppc_tce_iommu_map(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
unsigned long i, pgoff, ret = H_SUCCESS;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry * subpages;
for (i = 0, pgoff = 0; i < subpages;
++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
ret = kvmppc_tce_iommu_do_map(kvm, tbl,
io_entry + i, ua + pgoff, dir);
if (ret != H_SUCCESS)
break;
}
return ret;
}
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
......@@ -491,10 +528,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE)
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry);
else
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
if (ret == H_SUCCESS)
......@@ -570,7 +607,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ret = kvmppc_tce_iommu_map(vcpu->kvm,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
......@@ -615,10 +652,10 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
unsigned long entry = ioba >> stit->tbl->it_page_shift;
unsigned long entry = ioba >> stt->page_shift;
for (i = 0; i < npages; ++i) {
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry + i);
if (ret == H_SUCCESS)
......
......@@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
return H_SUCCESS;
}
static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
struct iommu_table *tbl, unsigned long entry)
{
enum dma_data_direction dir = DMA_NONE;
......@@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
return ret;
}
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
unsigned long entry)
{
unsigned long i, ret = H_SUCCESS;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry * subpages;
for (i = 0; i < subpages; ++i) {
ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
if (ret != H_SUCCESS)
break;
}
return ret;
}
static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
......@@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
return 0;
}
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
unsigned long i, pgoff, ret = H_SUCCESS;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry * subpages;
for (i = 0, pgoff = 0; i < subpages;
++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
io_entry + i, ua + pgoff, dir);
if (ret != H_SUCCESS)
break;
}
return ret;
}
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
......@@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE)
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry);
else
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
if (ret == H_SUCCESS)
......@@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
......@@ -526,10 +564,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
unsigned long entry = ioba >> stit->tbl->it_page_shift;
unsigned long entry = ioba >> stt->page_shift;
for (i = 0; i < npages; ++i) {
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry + i);
if (ret == H_SUCCESS)
......@@ -571,7 +609,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
page = stt->pages[idx / TCES_PER_PAGE];
tbl = (u64 *)page_address(page);
vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
return H_SUCCESS;
}
......
This diff is collapsed.
......@@ -123,6 +123,32 @@ static bool no_mixing_hpt_and_radix;
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/*
* RWMR values for POWER8. These control the rate at which PURR
* and SPURR count and should be set according to the number of
* online threads in the vcore being run.
*/
#define RWMR_RPA_P8_1THREAD 0x164520C62609AECA
#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9
#define RWMR_RPA_P8_3THREAD 0x164520C62609AECA
#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9
#define RWMR_RPA_P8_5THREAD 0x164520C62609AECA
#define RWMR_RPA_P8_6THREAD 0x164520C62609AECA
#define RWMR_RPA_P8_7THREAD 0x164520C62609AECA
#define RWMR_RPA_P8_8THREAD 0x164520C62609AECA
static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
RWMR_RPA_P8_1THREAD,
RWMR_RPA_P8_1THREAD,
RWMR_RPA_P8_2THREAD,
RWMR_RPA_P8_3THREAD,
RWMR_RPA_P8_4THREAD,
RWMR_RPA_P8_5THREAD,
RWMR_RPA_P8_6THREAD,
RWMR_RPA_P8_7THREAD,
RWMR_RPA_P8_8THREAD,
};
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
int *ip)
{
......@@ -371,13 +397,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
for (r = 0; r < 16; ++r)
pr_err("r%2d = %.16lx r%d = %.16lx\n",
r, kvmppc_get_gpr(vcpu, r),
r+16, kvmppc_get_gpr(vcpu, r+16));
pr_err("ctr = %.16lx lr = %.16lx\n",
vcpu->arch.ctr, vcpu->arch.lr);
vcpu->arch.regs.ctr, vcpu->arch.regs.link);
pr_err("srr0 = %.16llx srr1 = %.16llx\n",
vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
......@@ -385,7 +411,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
pr_err("fault dar = %.16lx dsisr = %.8x\n",
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
......@@ -1526,6 +1552,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.dec_expires +
vcpu->arch.vcore->tb_offset);
break;
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
break;
default:
r = -EINVAL;
break;
......@@ -1757,6 +1786,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.dec_expires = set_reg_val(id, *val) -
vcpu->arch.vcore->tb_offset;
break;
case KVM_REG_PPC_ONLINE:
i = set_reg_val(id, *val);
if (i && !vcpu->arch.online)
atomic_inc(&vcpu->arch.vcore->online_count);
else if (!i && vcpu->arch.online)
atomic_dec(&vcpu->arch.vcore->online_count);
vcpu->arch.online = i;
break;
default:
r = -EINVAL;
break;
......@@ -2850,6 +2887,25 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
}
}
/*
* On POWER8, set RWMR register.
* Since it only affects PURR and SPURR, it doesn't affect
* the host, so we don't save/restore the host value.
*/
if (is_power8) {
unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
int n_online = atomic_read(&vc->online_count);
/*
* Use the 8-thread value if we're doing split-core
* or if the vcore's online count looks bogus.
*/
if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
n_online >= 1 && n_online <= MAX_SMT_THREADS)
rwmr_val = p8_rwmr_values[n_online];
mtspr(SPRN_RWMR, rwmr_val);
}
/* Start all the threads */
active = 0;
for (sub = 0; sub < core_info.n_subcores; ++sub) {
......@@ -2902,6 +2958,32 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
for (sub = 0; sub < core_info.n_subcores; ++sub)
spin_unlock(&core_info.vc[sub]->lock);
if (kvm_is_radix(vc->kvm)) {
int tmp = pcpu;
/*
* Do we need to flush the process scoped TLB for the LPAR?
*
* On POWER9, individual threads can come in here, but the
* TLB is shared between the 4 threads in a core, hence
* invalidating on one thread invalidates for all.
* Thus we make all 4 threads use the same bit here.
*
* Hash must be flushed in realmode in order to use tlbiel.
*/
mtspr(SPRN_LPID, vc->kvm->arch.lpid);
isync();
if (cpu_has_feature(CPU_FTR_ARCH_300))
tmp &= ~0x3UL;
if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) {
radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid);
/* Clear the bit after the TLB flush */
cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush);
}
}
/*
* Interrupts will be enabled once we get into the guest,
* so tell lockdep that we're about to enable interrupts.
......@@ -3356,6 +3438,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
#endif
/*
* Force online to 1 for the sake of old userspace which doesn't
* set it.
*/
if (!vcpu->arch.online) {
atomic_inc(&vcpu->arch.vcore->online_count);
vcpu->arch.online = 1;
}
kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */
......
......@@ -18,6 +18,7 @@
#include <linux/cma.h>
#include <linux/bitops.h>
#include <asm/asm-prototypes.h>
#include <asm/cputable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
......@@ -211,9 +212,9 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu)
/* Only need to do the expensive mfmsr() on radix */
if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
r = powernv_get_random_long(&vcpu->arch.gpr[4]);
r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
else
r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]);
r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
if (r)
return H_SUCCESS;
......@@ -562,7 +563,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
vcpu->arch.gpr[5] = get_tb();
vcpu->arch.regs.gpr[5] = get_tb();
if (xive_enabled()) {
if (is_rm())
return xive_rm_h_xirr(vcpu);
......@@ -633,7 +634,19 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
void kvmppc_bad_interrupt(struct pt_regs *regs)
{
die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
/*
* 100 could happen at any time, 200 can happen due to invalid real
* address access for example (or any time due to a hardware problem).
*/
if (TRAP(regs) == 0x100) {
get_paca()->in_nmi++;
system_reset_exception(regs);
get_paca()->in_nmi--;
} else if (TRAP(regs) == 0x200) {
machine_check_exception(regs);
} else {
die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
}
panic("Bad KVM trap");
}
......
......@@ -137,7 +137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
/*
* We return here in virtual mode after the guest exits
* with something that we can't handle in real mode.
* Interrupts are enabled again at this point.
* Interrupts are still hard-disabled.
*/
/*
......
......@@ -418,7 +418,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel)
{
return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]);
}
#ifdef __BIG_ENDIAN__
......@@ -434,24 +435,6 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r)
(HPTE_R_KEY_HI | HPTE_R_KEY_LO));
}
static inline int try_lock_tlbie(unsigned int *lock)
{
unsigned int tmp, old;
unsigned int token = LOCK_TOKEN;
asm volatile("1:lwarx %1,0,%2\n"
" cmpwi cr0,%1,0\n"
" bne 2f\n"
" stwcx. %3,0,%2\n"
" bne- 1b\n"
" isync\n"
"2:"
: "=&r" (tmp), "=&r" (old)
: "r" (lock), "r" (token)
: "cc", "memory");
return old == 0;
}
static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
long npages, int global, bool need_sync)
{
......@@ -463,8 +446,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
* the RS field, this is backwards-compatible with P7 and P8.
*/
if (global) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax();
if (need_sync)
asm volatile("ptesync" : : : "memory");
for (i = 0; i < npages; ++i) {
......@@ -483,7 +464,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
}
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
kvm->arch.tlbie_lock = 0;
} else {
if (need_sync)
asm volatile("ptesync" : : : "memory");
......@@ -561,13 +541,13 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn)
{
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.gpr[4]);
&vcpu->arch.regs.gpr[4]);
}
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
unsigned long *args = &vcpu->arch.gpr[4];
unsigned long *args = &vcpu->arch.regs.gpr[4];
__be64 *hp, *hptes[4];
unsigned long tlbrb[4];
long int i, j, k, n, found, indexes[4];
......@@ -787,8 +767,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
r &= ~HPTE_GR_RESERVED;
}
vcpu->arch.gpr[4 + i * 2] = v;
vcpu->arch.gpr[5 + i * 2] = r;
vcpu->arch.regs.gpr[4 + i * 2] = v;
vcpu->arch.regs.gpr[5 + i * 2] = r;
}
return H_SUCCESS;
}
......@@ -834,7 +814,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
}
}
}
vcpu->arch.gpr[4] = gr;
vcpu->arch.regs.gpr[4] = gr;
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
......@@ -881,7 +861,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
kvmppc_set_dirty_from_hpte(kvm, v, gr);
}
}
vcpu->arch.gpr[4] = gr;
vcpu->arch.regs.gpr[4] = gr;
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
......
......@@ -517,7 +517,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
} while (!icp_rm_try_update(icp, old_state, new_state));
/* Return the result in GPR4 */
vcpu->arch.gpr[4] = xirr;
vcpu->arch.regs.gpr[4] = xirr;
return check_too_hard(xics, icp);
}
......
This diff is collapsed.
......@@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
u64 texasr, tfiar;
u64 msr = vcpu->arch.shregs.msr;
tfiar = vcpu->arch.pc & ~0x3ull;
tfiar = vcpu->arch.regs.nip & ~0x3ull;
texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
texasr |= TEXASR_SUSP;
......@@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
(newmsr & MSR_TM)));
newmsr = sanitize_msr(newmsr);
vcpu->arch.shregs.msr = newmsr;
vcpu->arch.cfar = vcpu->arch.pc - 4;
vcpu->arch.pc = vcpu->arch.shregs.srr0;
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
return RESUME_GUEST;
case PPC_INST_RFEBB:
......@@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.bescr = bescr;
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = msr;
vcpu->arch.cfar = vcpu->arch.pc - 4;
vcpu->arch.pc = vcpu->arch.ebbrr;
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.regs.nip = vcpu->arch.ebbrr;
return RESUME_GUEST;
case PPC_INST_MTMSRD:
......
......@@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
return 0;
newmsr = sanitize_msr(newmsr);
vcpu->arch.shregs.msr = newmsr;
vcpu->arch.cfar = vcpu->arch.pc - 4;
vcpu->arch.pc = vcpu->arch.shregs.srr0;
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
return 1;
case PPC_INST_RFEBB:
......@@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
mtspr(SPRN_BESCR, bescr);
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = msr;
vcpu->arch.cfar = vcpu->arch.pc - 4;
vcpu->arch.pc = mfspr(SPRN_EBBRR);
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
return 1;
case PPC_INST_MTMSRD:
......@@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
{
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
vcpu->arch.pc = vcpu->arch.tfhar;
vcpu->arch.regs.nip = vcpu->arch.tfhar;
copy_from_checkpoint(vcpu);
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
}
This diff is collapsed.
......@@ -383,6 +383,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
*/
PPC_LL r6, HSTATE_HOST_MSR(r13)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* We don't want to change MSR[TS] bits via rfi here.
* The actual TM handling logic will be in host with
* recovered DR/IR bits after HSTATE_VMHANDLER.
* And MSR_TM can be enabled in HOST_MSR so rfid may
* not suppress this change and can lead to exception.
* Manually set MSR to prevent TS state change here.
*/
mfmsr r7
rldicl r7, r7, 64 - MSR_TS_S_LG, 62
rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG
#endif
PPC_LL r8, HSTATE_VMHANDLER(r13)
#ifdef CONFIG_PPC64
......
......@@ -334,7 +334,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
*/
/* Return interrupt and old CPPR in GPR4 */
vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
return H_SUCCESS;
}
......@@ -369,7 +369,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long
hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
/* Return interrupt and old CPPR in GPR4 */
vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
return H_SUCCESS;
}
......
......@@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
{
int i;
printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
vcpu->arch.shared->msr);
printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
vcpu->arch.regs.ctr);
printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
vcpu->arch.shared->srr1);
......@@ -491,24 +493,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
if (allowed) {
switch (int_class) {
case INT_CLASS_NONCRIT:
set_guest_srr(vcpu, vcpu->arch.pc,
set_guest_srr(vcpu, vcpu->arch.regs.nip,
vcpu->arch.shared->msr);
break;
case INT_CLASS_CRIT:
set_guest_csrr(vcpu, vcpu->arch.pc,
set_guest_csrr(vcpu, vcpu->arch.regs.nip,
vcpu->arch.shared->msr);
break;
case INT_CLASS_DBG:
set_guest_dsrr(vcpu, vcpu->arch.pc,
set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
vcpu->arch.shared->msr);
break;
case INT_CLASS_MC:
set_guest_mcsrr(vcpu, vcpu->arch.pc,
set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
vcpu->arch.shared->msr);
break;
}
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
vcpu->arch.regs.nip = vcpu->arch.ivpr |
vcpu->arch.ivor[priority];
if (update_esr == true)
kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true)
......@@ -826,7 +829,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
__func__, vcpu->arch.pc, vcpu->arch.last_inst);
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and
* report it to userspace. */
run->hw.hardware_exit_reason = ~0ULL << 32;
......@@ -875,7 +878,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/
vcpu->arch.dbsr = 0;
run->debug.arch.status = 0;
run->debug.arch.address = vcpu->arch.pc;
run->debug.arch.address = vcpu->arch.regs.nip;
if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
......@@ -971,7 +974,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
case EMULATE_FAIL:
pr_debug("%s: load instruction from guest address %lx failed\n",
__func__, vcpu->arch.pc);
__func__, vcpu->arch.regs.nip);
/* For debugging, encode the failing instruction and
* report it to userspace. */
run->hw.hardware_exit_reason = ~0ULL << 32;
......@@ -1169,7 +1172,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOKE_INTERRUPT_SPE_FP_DATA:
case BOOKE_INTERRUPT_SPE_FP_ROUND:
printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
__func__, exit_nr, vcpu->arch.pc);
__func__, exit_nr, vcpu->arch.regs.nip);
run->hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST;
break;
......@@ -1299,7 +1302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOKE_INTERRUPT_ITLB_MISS: {
unsigned long eaddr = vcpu->arch.pc;
unsigned long eaddr = vcpu->arch.regs.nip;
gpa_t gpaddr;
gfn_t gfn;
int gtlb_index;
......@@ -1391,7 +1394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
int i;
int r;
vcpu->arch.pc = 0;
vcpu->arch.regs.nip = 0;
vcpu->arch.shared->pir = vcpu->vcpu_id;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
kvmppc_set_msr(vcpu, 0);
......@@ -1440,10 +1443,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu_load(vcpu);
regs->pc = vcpu->arch.pc;
regs->pc = vcpu->arch.regs.nip;
regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr;
regs->lr = vcpu->arch.lr;
regs->ctr = vcpu->arch.regs.ctr;
regs->lr = vcpu->arch.regs.link;
regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.shared->msr;
regs->srr0 = kvmppc_get_srr0(vcpu);
......@@ -1471,10 +1474,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu_load(vcpu);
vcpu->arch.pc = regs->pc;
vcpu->arch.regs.nip = regs->pc;
kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr;
vcpu->arch.lr = regs->lr;
vcpu->arch.regs.ctr = regs->ctr;
vcpu->arch.regs.link = regs->lr;
kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
kvmppc_set_srr0(vcpu, regs->srr0);
......
......@@ -34,19 +34,19 @@
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.shared->srr0;
vcpu->arch.regs.nip = vcpu->arch.shared->srr0;
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
}
static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.dsrr0;
vcpu->arch.regs.nip = vcpu->arch.dsrr0;
kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
}
static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.csrr0;
vcpu->arch.regs.nip = vcpu->arch.csrr0;
kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
}
......
......@@ -53,7 +53,7 @@ static int dbell2prio(ulong param)
static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
{
ulong param = vcpu->arch.gpr[rb];
ulong param = vcpu->arch.regs.gpr[rb];
int prio = dbell2prio(param);
if (prio < 0)
......@@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
{
ulong param = vcpu->arch.gpr[rb];
ulong param = vcpu->arch.regs.gpr[rb];
int prio = dbell2prio(rb);
int pir = param & PPC_DBELL_PIR_MASK;
int i;
......@@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_oc(inst)) {
case EHPRIV_OC_DEBUG:
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.address = vcpu->arch.pc;
run->debug.arch.address = vcpu->arch.regs.nip;
run->debug.arch.status = 0;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
emulated = EMULATE_EXIT_USER;
......
......@@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
{
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
}
void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
......
......@@ -625,8 +625,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
}
#ifdef CONFIG_KVM_BOOKE_HV
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
u32 *instr)
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_fetch_type type, u32 *instr)
{
gva_t geaddr;
hpa_t addr;
......@@ -715,8 +715,8 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
return EMULATE_DONE;
}
#else
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
u32 *instr)
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_fetch_type type, u32 *instr)
{
return EMULATE_AGAIN;
}
......
This diff is collapsed.
This diff is collapsed.
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Derived from book3s_hv_rmhandlers.S, which is:
*
* Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
*/
#include <asm/reg.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
#include <asm/tm.h>
#include <asm/cputable.h>
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
/*
* Save transactional state and TM-related registers.
* Called with:
* - r3 pointing to the vcpu struct
* - r4 points to the MSR with current TS bits:
* (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
* This can modify all checkpointed registers, but
* restores r1, r2 before exit.
*/
_GLOBAL(__kvmppc_save_tm)
mflr r0
std r0, PPC_LR_STKOFF(r1)
/* Turn on TM. */
mfmsr r8
li r0, 1
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
ori r8, r8, MSR_FP
oris r8, r8, (MSR_VEC | MSR_VSX)@h
mtmsrd r8
rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
beq 1f /* TM not active in guest. */
std r1, HSTATE_SCRATCH2(r13)
std r3, HSTATE_SCRATCH1(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
BEGIN_FTR_SECTION
/* Emulation of the treclaim instruction needs TEXASR before treclaim */
mfspr r6, SPRN_TEXASR
std r6, VCPU_ORIG_TEXASR(r3)
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
#endif
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
li r5, 0
mtmsrd r5, 1
li r3, TM_CAUSE_KVM_RESCHED
/* All GPRs are volatile at this point. */
TRECLAIM(R3)
/* Temporarily store r13 and r9 so we have some regs to play with */
SET_SCRATCH0(r13)
GET_PACA(r13)
std r9, PACATMSCRATCH(r13)
ld r9, HSTATE_SCRATCH1(r13)
/* Get a few more GPRs free. */
std r29, VCPU_GPRS_TM(29)(r9)
std r30, VCPU_GPRS_TM(30)(r9)
std r31, VCPU_GPRS_TM(31)(r9)
/* Save away PPR and DSCR soon so don't run with user values. */
mfspr r31, SPRN_PPR
HMT_MEDIUM
mfspr r30, SPRN_DSCR
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
ld r29, HSTATE_DSCR(r13)
mtspr SPRN_DSCR, r29
#endif
/* Save all but r9, r13 & r29-r31 */
reg = 0
.rept 29
.if (reg != 9) && (reg != 13)
std reg, VCPU_GPRS_TM(reg)(r9)
.endif
reg = reg + 1
.endr
/* ... now save r13 */
GET_SCRATCH0(r4)
std r4, VCPU_GPRS_TM(13)(r9)
/* ... and save r9 */
ld r4, PACATMSCRATCH(r13)
std r4, VCPU_GPRS_TM(9)(r9)
/* Reload stack pointer and TOC. */
ld r1, HSTATE_SCRATCH2(r13)
ld r2, PACATOC(r13)
/* Set MSR RI now we have r1 and r13 back. */
li r5, MSR_RI
mtmsrd r5, 1
/* Save away checkpinted SPRs. */
std r31, VCPU_PPR_TM(r9)
std r30, VCPU_DSCR_TM(r9)
mflr r5
mfcr r6
mfctr r7
mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR
mfxer r11
std r5, VCPU_LR_TM(r9)
stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9)
std r11, VCPU_XER_TM(r9)
/* Restore r12 as trap number. */
lwz r12, VCPU_TRAP(r9)
/* Save FP/VSX. */
addi r3, r9, VCPU_FPRS_TM
bl store_fp_state
addi r3, r9, VCPU_VRS_TM
bl store_vr_state
mfspr r6, SPRN_VRSAVE
stw r6, VCPU_VRSAVE_TM(r9)
1:
/*
* We need to save these SPRs after the treclaim so that the software
* error code is recorded correctly in the TEXASR. Also the user may
* change these outside of a transaction, so they must always be
* context switched.
*/
mfspr r7, SPRN_TEXASR
std r7, VCPU_TEXASR(r9)
11:
mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
/*
* _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can
* be invoked from C function by PR KVM only.
*/
_GLOBAL(_kvmppc_save_tm_pr)
mflr r5
std r5, PPC_LR_STKOFF(r1)
stdu r1, -SWITCH_FRAME_SIZE(r1)
SAVE_NVGPRS(r1)
/* save MSR since TM/math bits might be impacted
* by __kvmppc_save_tm().
*/
mfmsr r5
SAVE_GPR(5, r1)
/* also save DSCR/CR/TAR so that it can be recovered later */
mfspr r6, SPRN_DSCR
SAVE_GPR(6, r1)
mfcr r7
stw r7, _CCR(r1)
mfspr r8, SPRN_TAR
SAVE_GPR(8, r1)
bl __kvmppc_save_tm
REST_GPR(8, r1)
mtspr SPRN_TAR, r8
ld r7, _CCR(r1)
mtcr r7
REST_GPR(6, r1)
mtspr SPRN_DSCR, r6
/* need preserve current MSR's MSR_TS bits */
REST_GPR(5, r1)
mfmsr r6
rldicl r6, r6, 64 - MSR_TS_S_LG, 62
rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
mtmsrd r5
REST_NVGPRS(r1)
addi r1, r1, SWITCH_FRAME_SIZE
ld r5, PPC_LR_STKOFF(r1)
mtlr r5
blr
EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
/*
* Restore transactional state and TM-related registers.
* Called with:
* - r3 pointing to the vcpu struct.
* - r4 is the guest MSR with desired TS bits:
* For HV KVM, it is VCPU_MSR
* For PR KVM, it is provided by caller
* This potentially modifies all checkpointed registers.
* It restores r1, r2 from the PACA.
*/
_GLOBAL(__kvmppc_restore_tm)
mflr r0
std r0, PPC_LR_STKOFF(r1)
/* Turn on TM/FP/VSX/VMX so we can restore them. */
mfmsr r5
li r6, MSR_TM >> 32
sldi r6, r6, 32
or r5, r5, r6
ori r5, r5, MSR_FP
oris r5, r5, (MSR_VEC | MSR_VSX)@h
mtmsrd r5
/*
* The user may change these outside of a transaction, so they must
* always be context switched.
*/
ld r5, VCPU_TFHAR(r3)
ld r6, VCPU_TFIAR(r3)
ld r7, VCPU_TEXASR(r3)
mtspr SPRN_TFHAR, r5
mtspr SPRN_TFIAR, r6
mtspr SPRN_TEXASR, r7
mr r5, r4
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
beqlr /* TM not active in guest */
std r1, HSTATE_SCRATCH2(r13)
/* Make sure the failure summary is set, otherwise we'll program check
* when we trechkpt. It's possible that this might have been not set
* on a kvmppc_set_one_reg() call but we shouldn't let this crash the
* host.
*/
oris r7, r7, (TEXASR_FS)@h
mtspr SPRN_TEXASR, r7
/*
* We need to load up the checkpointed state for the guest.
* We need to do this early as it will blow away any GPRs, VSRs and
* some SPRs.
*/
mr r31, r3
addi r3, r31, VCPU_FPRS_TM
bl load_fp_state
addi r3, r31, VCPU_VRS_TM
bl load_vr_state
mr r3, r31
lwz r7, VCPU_VRSAVE_TM(r3)
mtspr SPRN_VRSAVE, r7
ld r5, VCPU_LR_TM(r3)
lwz r6, VCPU_CR_TM(r3)
ld r7, VCPU_CTR_TM(r3)
ld r8, VCPU_AMR_TM(r3)
ld r9, VCPU_TAR_TM(r3)
ld r10, VCPU_XER_TM(r3)
mtlr r5
mtcr r6
mtctr r7
mtspr SPRN_AMR, r8
mtspr SPRN_TAR, r9
mtxer r10
/*
* Load up PPR and DSCR values but don't put them in the actual SPRs
* till the last moment to avoid running with userspace PPR and DSCR for
* too long.
*/
ld r29, VCPU_DSCR_TM(r3)
ld r30, VCPU_PPR_TM(r3)
std r2, PACATMSCRATCH(r13) /* Save TOC */
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
li r5, 0
mtmsrd r5, 1
/* Load GPRs r0-r28 */
reg = 0
.rept 29
ld reg, VCPU_GPRS_TM(reg)(r31)
reg = reg + 1
.endr
mtspr SPRN_DSCR, r29
mtspr SPRN_PPR, r30
/* Load final GPRs */
ld 29, VCPU_GPRS_TM(29)(r31)
ld 30, VCPU_GPRS_TM(30)(r31)
ld 31, VCPU_GPRS_TM(31)(r31)
/* TM checkpointed state is now setup. All GPRs are now volatile. */
TRECHKPT
/* Now let's get back the state we need. */
HMT_MEDIUM
GET_PACA(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
ld r29, HSTATE_DSCR(r13)
mtspr SPRN_DSCR, r29
#endif
ld r1, HSTATE_SCRATCH2(r13)
ld r2, PACATMSCRATCH(r13)
/* Set the MSR RI since we have our registers back. */
li r5, MSR_RI
mtmsrd r5, 1
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
/*
* _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it
* can be invoked from C function by PR KVM only.
*/
_GLOBAL(_kvmppc_restore_tm_pr)
mflr r5
std r5, PPC_LR_STKOFF(r1)
stdu r1, -SWITCH_FRAME_SIZE(r1)
SAVE_NVGPRS(r1)
/* save MSR to avoid TM/math bits change */
mfmsr r5
SAVE_GPR(5, r1)
/* also save DSCR/CR/TAR so that it can be recovered later */
mfspr r6, SPRN_DSCR
SAVE_GPR(6, r1)
mfcr r7
stw r7, _CCR(r1)
mfspr r8, SPRN_TAR
SAVE_GPR(8, r1)
bl __kvmppc_restore_tm
REST_GPR(8, r1)
mtspr SPRN_TAR, r8
ld r7, _CCR(r1)
mtcr r7
REST_GPR(6, r1)
mtspr SPRN_DSCR, r6
/* need preserve current MSR's MSR_TS bits */
REST_GPR(5, r1)
mfmsr r6
rldicl r6, r6, 64 - MSR_TS_S_LG, 62
rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
mtmsrd r5
REST_NVGPRS(r1)
addi r1, r1, SWITCH_FRAME_SIZE
ld r5, PPC_LR_STKOFF(r1)
mtlr r5
blr
EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment