Commit 2837dbce authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/ppc-kvm' into next

Merge our KVM topic branch to bring some KVM commits into next for wider
testing.
parents 87b626a6 a3800ef9
...@@ -758,7 +758,7 @@ struct kvm_vcpu_arch { ...@@ -758,7 +758,7 @@ struct kvm_vcpu_arch {
u8 prodded; u8 prodded;
u8 doorbell_request; u8 doorbell_request;
u8 irq_pending; /* Used by XIVE to signal pending guest irqs */ u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
u32 last_inst; unsigned long last_inst;
struct rcuwait wait; struct rcuwait wait;
struct rcuwait *waitp; struct rcuwait *waitp;
...@@ -818,7 +818,7 @@ struct kvm_vcpu_arch { ...@@ -818,7 +818,7 @@ struct kvm_vcpu_arch {
u64 busy_stolen; u64 busy_stolen;
u64 busy_preempt; u64 busy_preempt;
u32 emul_inst; u64 emul_inst;
u32 online; u32 online;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/xive.h> #include <asm/xive.h>
#include <asm/cpu_has_feature.h> #include <asm/cpu_has_feature.h>
#endif #endif
#include <asm/inst.h>
/* /*
* KVMPPC_INST_SW_BREAKPOINT is debug Instruction * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
...@@ -84,7 +85,8 @@ extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, ...@@ -84,7 +85,8 @@ extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int is_default_endian); int is_default_endian);
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_fetch_type type, u32 *inst); enum instruction_fetch_type type,
unsigned long *inst);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data); bool data);
...@@ -126,25 +128,34 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); ...@@ -126,25 +128,34 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu,
ulong srr1_flags);
extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu,
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); ulong srr1_flags);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu,
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu); ulong srr1_flags);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu,
ulong srr1_flags);
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu,
ulong srr1_flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq); struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags, extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
ulong dear_flags,
ulong esr_flags); ulong esr_flags);
extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
ulong dear_flags, ulong srr1_flags,
ulong esr_flags); ulong dar,
ulong dsisr);
extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
ulong esr_flags); ulong srr1_flags);
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
...@@ -315,7 +326,7 @@ extern struct kvmppc_ops *kvmppc_hv_ops; ...@@ -315,7 +326,7 @@ extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops; extern struct kvmppc_ops *kvmppc_pr_ops;
static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
enum instruction_fetch_type type, u32 *inst) enum instruction_fetch_type type, ppc_inst_t *inst)
{ {
int ret = EMULATE_DONE; int ret = EMULATE_DONE;
u32 fetched_inst; u32 fetched_inst;
...@@ -326,15 +337,30 @@ static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, ...@@ -326,15 +337,30 @@ static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
/* Write fetch_failed unswapped if the fetch failed */ /* Write fetch_failed unswapped if the fetch failed */
if (ret == EMULATE_DONE) if (ret != EMULATE_DONE) {
fetched_inst = kvmppc_need_byteswap(vcpu) ? *inst = ppc_inst(KVM_INST_FETCH_FAILED);
swab32(vcpu->arch.last_inst) : return ret;
vcpu->arch.last_inst; }
else
fetched_inst = vcpu->arch.last_inst; #ifdef CONFIG_PPC64
/* Is this a prefixed instruction? */
if ((vcpu->arch.last_inst >> 32) != 0) {
u32 prefix = vcpu->arch.last_inst >> 32;
u32 suffix = vcpu->arch.last_inst;
if (kvmppc_need_byteswap(vcpu)) {
prefix = swab32(prefix);
suffix = swab32(suffix);
}
*inst = ppc_inst_prefix(prefix, suffix);
return EMULATE_DONE;
}
#endif
*inst = fetched_inst; fetched_inst = kvmppc_need_byteswap(vcpu) ?
return ret; swab32(vcpu->arch.last_inst) :
vcpu->arch.last_inst;
*inst = ppc_inst(fetched_inst);
return EMULATE_DONE;
} }
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
......
...@@ -417,6 +417,7 @@ ...@@ -417,6 +417,7 @@
#define FSCR_DSCR __MASK(FSCR_DSCR_LG) #define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */ #define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG) #define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG) #define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG) #define HFSCR_EBB __MASK(FSCR_EBB_LG)
......
...@@ -188,10 +188,10 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) ...@@ -188,10 +188,10 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
} }
EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags) void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags);
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check); EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
...@@ -201,29 +201,29 @@ void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu) ...@@ -201,29 +201,29 @@ void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL(kvmppc_core_queue_syscall); EXPORT_SYMBOL(kvmppc_core_queue_syscall);
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags);
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags);
} }
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags);
} }
void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags);
} }
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
...@@ -278,18 +278,18 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) ...@@ -278,18 +278,18 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
} }
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
ulong flags) ulong dar, ulong dsisr)
{ {
kvmppc_set_dar(vcpu, dar); kvmppc_set_dar(vcpu, dar);
kvmppc_set_dsisr(vcpu, flags); kvmppc_set_dsisr(vcpu, dsisr);
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags);
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags);
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage); EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
...@@ -481,20 +481,42 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, ...@@ -481,20 +481,42 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
return r; return r;
} }
/*
* Returns prefixed instructions with the prefix in the high 32 bits
* of *inst and suffix in the low 32 bits. This is the same convention
* as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst.
* Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each
* half of the value needs byte-swapping if the guest endianness is
* different from the host endianness.
*/
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_fetch_type type, u32 *inst) enum instruction_fetch_type type, unsigned long *inst)
{ {
ulong pc = kvmppc_get_pc(vcpu); ulong pc = kvmppc_get_pc(vcpu);
int r; int r;
u32 iw;
if (type == INST_SC) if (type == INST_SC)
pc -= 4; pc -= 4;
r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false); r = kvmppc_ld(vcpu, &pc, sizeof(u32), &iw, false);
if (r == EMULATE_DONE) if (r != EMULATE_DONE)
return r;
else
return EMULATE_AGAIN; return EMULATE_AGAIN;
/*
* If [H]SRR1 indicates that the instruction that caused the
* current interrupt is a prefixed instruction, get the suffix.
*/
if (kvmppc_get_msr(vcpu) & SRR1_PREFIXED) {
u32 suffix;
pc += 4;
r = kvmppc_ld(vcpu, &pc, sizeof(u32), &suffix, false);
if (r != EMULATE_DONE)
return EMULATE_AGAIN;
*inst = ((u64)iw << 32) | suffix;
} else {
*inst = iw;
}
return r;
} }
EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
......
...@@ -415,20 +415,25 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -415,20 +415,25 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
* embodied here.) If the instruction isn't a load or store, then * embodied here.) If the instruction isn't a load or store, then
* this doesn't return anything useful. * this doesn't return anything useful.
*/ */
static int instruction_is_store(unsigned int instr) static int instruction_is_store(ppc_inst_t instr)
{ {
unsigned int mask; unsigned int mask;
unsigned int suffix;
mask = 0x10000000; mask = 0x10000000;
if ((instr & 0xfc000000) == 0x7c000000) suffix = ppc_inst_val(instr);
if (ppc_inst_prefixed(instr))
suffix = ppc_inst_suffix(instr);
else if ((suffix & 0xfc000000) == 0x7c000000)
mask = 0x100; /* major opcode 31 */ mask = 0x100; /* major opcode 31 */
return (instr & mask) != 0; return (suffix & mask) != 0;
} }
int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store) unsigned long gpa, gva_t ea, int is_store)
{ {
u32 last_inst; ppc_inst_t last_inst;
bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
/* /*
* Fast path - check if the guest physical address corresponds to a * Fast path - check if the guest physical address corresponds to a
...@@ -443,7 +448,7 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, ...@@ -443,7 +448,7 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
NULL); NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!ret) { if (!ret) {
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4));
return RESUME_GUEST; return RESUME_GUEST;
} }
} }
...@@ -458,7 +463,16 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, ...@@ -458,7 +463,16 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
/* /*
* WARNING: We do not know for sure whether the instruction we just * WARNING: We do not know for sure whether the instruction we just
* read from memory is the same that caused the fault in the first * read from memory is the same that caused the fault in the first
* place. If the instruction we read is neither an load or a store, * place.
*
* If the fault is prefixed but the instruction is not or vice
* versa, try again so that we don't advance pc the wrong amount.
*/
if (ppc_inst_prefixed(last_inst) != is_prefixed)
return RESUME_GUEST;
/*
* If the instruction we read is neither an load or a store,
* then it can't access memory, so we don't need to worry about * then it can't access memory, so we don't need to worry about
* enforcing access permissions. So, assuming it is a load or * enforcing access permissions. So, assuming it is a load or
* store, we just check that its direction (load or store) is * store, we just check that its direction (load or store) is
......
...@@ -954,7 +954,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -954,7 +954,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
if (dsisr & DSISR_BADACCESS) { if (dsisr & DSISR_BADACCESS) {
/* Reflect to the guest as DSI */ /* Reflect to the guest as DSI */
pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
kvmppc_core_queue_data_storage(vcpu, ea, dsisr); kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
...@@ -979,7 +981,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -979,7 +981,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
* Bad address in guest page table tree, or other * Bad address in guest page table tree, or other
* unusual error - reflect it to the guest as DSI. * unusual error - reflect it to the guest as DSI.
*/ */
kvmppc_core_queue_data_storage(vcpu, ea, dsisr); kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
...@@ -988,8 +992,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -988,8 +992,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
if (memslot->flags & KVM_MEM_READONLY) { if (memslot->flags & KVM_MEM_READONLY) {
if (writing) { if (writing) {
/* give the guest a DSI */ /* give the guest a DSI */
kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE | kvmppc_core_queue_data_storage(vcpu,
DSISR_PROTFAULT); kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST; return RESUME_GUEST;
} }
kvm_ro = true; kvm_ro = true;
......
...@@ -474,7 +474,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) ...@@ -474,7 +474,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
for (r = 0; r < vcpu->arch.slb_max; ++r) for (r = 0; r < vcpu->arch.slb_max; ++r)
pr_err(" ESID = %.16llx VSID = %.16llx\n", pr_err(" ESID = %.16llx VSID = %.16llx\n",
vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n",
vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
vcpu->arch.last_inst); vcpu->arch.last_inst);
} }
...@@ -1412,7 +1412,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) ...@@ -1412,7 +1412,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
{ {
u32 last_inst; ppc_inst_t last_inst;
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
EMULATE_DONE) { EMULATE_DONE) {
...@@ -1423,12 +1423,13 @@ static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) ...@@ -1423,12 +1423,13 @@ static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
return RESUME_GUEST; return RESUME_GUEST;
} }
if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { if (ppc_inst_val(last_inst) == KVMPPC_INST_SW_BREAKPOINT) {
vcpu->run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->exit_reason = KVM_EXIT_DEBUG;
vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
return RESUME_HOST; return RESUME_HOST;
} else { } else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
return RESUME_GUEST; return RESUME_GUEST;
} }
} }
...@@ -1476,9 +1477,11 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) ...@@ -1476,9 +1477,11 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
unsigned long arg; unsigned long arg;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tvcpu; struct kvm_vcpu *tvcpu;
ppc_inst_t pinst;
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst) != EMULATE_DONE)
return RESUME_GUEST; return RESUME_GUEST;
inst = ppc_inst_val(pinst);
if (get_op(inst) != 31) if (get_op(inst) != 31)
return EMULATE_FAIL; return EMULATE_FAIL;
rb = get_rb(inst); rb = get_rb(inst);
...@@ -1630,7 +1633,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1630,7 +1633,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* so that it knows that the machine check occurred. * so that it knows that the machine check occurred.
*/ */
if (!vcpu->kvm->arch.fwnmi_enabled) { if (!vcpu->kvm->arch.fwnmi_enabled) {
ulong flags = vcpu->arch.shregs.msr & 0x083c0000; ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_machine_check(vcpu, flags); kvmppc_core_queue_machine_check(vcpu, flags);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1659,7 +1663,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1659,7 +1663,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* as a result of a hypervisor emulation interrupt * as a result of a hypervisor emulation interrupt
* (e40) getting turned into a 700 by BML RTAS. * (e40) getting turned into a 700 by BML RTAS.
*/ */
flags = vcpu->arch.shregs.msr & 0x1f0000ull; flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_program(vcpu, flags); kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1740,6 +1745,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1740,6 +1745,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
kvmppc_core_queue_data_storage(vcpu, kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1758,6 +1764,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1758,6 +1764,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
r = RESUME_PAGE_FAULT; r = RESUME_PAGE_FAULT;
} else { } else {
kvmppc_core_queue_data_storage(vcpu, kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
vcpu->arch.fault_dar, err); vcpu->arch.fault_dar, err);
r = RESUME_GUEST; r = RESUME_GUEST;
} }
...@@ -1785,7 +1792,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1785,7 +1792,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
kvmppc_core_queue_inst_storage(vcpu, kvmppc_core_queue_inst_storage(vcpu,
vcpu->arch.fault_dsisr); vcpu->arch.fault_dsisr |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
} }
...@@ -1802,7 +1810,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1802,7 +1810,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
} else if (err == -1) { } else if (err == -1) {
r = RESUME_PAGE_FAULT; r = RESUME_PAGE_FAULT;
} else { } else {
kvmppc_core_queue_inst_storage(vcpu, err); kvmppc_core_queue_inst_storage(vcpu,
err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
...@@ -1823,7 +1832,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1823,7 +1832,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
r = kvmppc_emulate_debug_inst(vcpu); r = kvmppc_emulate_debug_inst(vcpu);
} else { } else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
...@@ -1864,7 +1874,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1864,7 +1874,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
r = kvmppc_tm_unavailable(vcpu); r = kvmppc_tm_unavailable(vcpu);
} }
if (r == EMULATE_FAIL) { if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
...@@ -1994,14 +2005,15 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) ...@@ -1994,14 +2005,15 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
*/ */
if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
(vcpu->arch.nested_hfscr & (1UL << cause))) { (vcpu->arch.nested_hfscr & (1UL << cause))) {
ppc_inst_t pinst;
vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
/* /*
* If the fetch failed, return to guest and * If the fetch failed, return to guest and
* try executing it again. * try executing it again.
*/ */
r = kvmppc_get_last_inst(vcpu, INST_GENERIC, r = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
&vcpu->arch.emul_inst); vcpu->arch.emul_inst = ppc_inst_val(pinst);
if (r != EMULATE_DONE) if (r != EMULATE_DONE)
r = RESUME_GUEST; r = RESUME_GUEST;
else else
...@@ -2918,13 +2930,18 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) ...@@ -2918,13 +2930,18 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
/* /*
* Set the default HFSCR for the guest from the host value. * Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9. * This value is only used on POWER9 and later.
* On POWER9, we want to virtualize the doorbell facility, so we * On >= POWER9, we want to virtualize the doorbell facility, so we
* don't set the HFSCR_MSGP bit, and that causes those instructions * don't set the HFSCR_MSGP bit, and that causes those instructions
* to trap and then we emulate them. * to trap and then we emulate them.
*/ */
vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
/* On POWER10 and later, allow prefixed instructions */
if (cpu_has_feature(CPU_FTR_ARCH_31))
vcpu->arch.hfscr |= HFSCR_PREFIX;
if (cpu_has_feature(CPU_FTR_HVMODE)) { if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
......
...@@ -1560,7 +1560,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, ...@@ -1560,7 +1560,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
/* unusual error -> reflect to the guest as a DSI */ /* unusual error -> reflect to the guest as a DSI */
kvmppc_core_queue_data_storage(vcpu, ea, dsisr); kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
...@@ -1570,8 +1572,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, ...@@ -1570,8 +1572,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
if (memslot->flags & KVM_MEM_READONLY) { if (memslot->flags & KVM_MEM_READONLY) {
if (writing) { if (writing) {
/* Give the guest a DSI */ /* Give the guest a DSI */
kvmppc_core_queue_data_storage(vcpu, ea, kvmppc_core_queue_data_storage(vcpu,
DSISR_ISSTORE | DSISR_PROTFAULT); kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST; return RESUME_GUEST;
} }
kvm_ro = true; kvm_ro = true;
......
...@@ -502,8 +502,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -502,8 +502,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* * * *
*****************************************************************************/ *****************************************************************************/
.global kvmppc_hv_entry SYM_CODE_START_LOCAL(kvmppc_hv_entry)
kvmppc_hv_entry:
/* Required state: /* Required state:
* *
...@@ -940,6 +939,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ...@@ -940,6 +939,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r4, VCPU_GPR(R4)(r4) ld r4, VCPU_GPR(R4)(r4)
HRFI_TO_GUEST HRFI_TO_GUEST
b . b .
SYM_CODE_END(kvmppc_hv_entry)
secondary_too_late: secondary_too_late:
li r12, 0 li r12, 0
...@@ -1071,11 +1071,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ...@@ -1071,11 +1071,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Save HEIR (HV emulation assist reg) in emul_inst /* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */ if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED li r3,KVM_INST_FETCH_FAILED
stw r3,VCPU_LAST_INST(r9) std r3,VCPU_LAST_INST(r9)
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f bne 11f
mfspr r3,SPRN_HEIR mfspr r3,SPRN_HEIR
11: stw r3,VCPU_HEIR(r9) 11: std r3,VCPU_HEIR(r9)
/* these are volatile across C function calls */ /* these are volatile across C function calls */
mfctr r3 mfctr r3
...@@ -1676,7 +1676,7 @@ fast_interrupt_c_return: ...@@ -1676,7 +1676,7 @@ fast_interrupt_c_return:
mtmsrd r3 mtmsrd r3
/* Store the result */ /* Store the result */
stw r8, VCPU_LAST_INST(r9) std r8, VCPU_LAST_INST(r9)
/* Unset guest mode. */ /* Unset guest mode. */
li r0, KVM_GUEST_MODE_HOST_HV li r0, KVM_GUEST_MODE_HOST_HV
......
...@@ -621,6 +621,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, ...@@ -621,6 +621,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu) int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu)
{ {
u32 inst; u32 inst;
ppc_inst_t pinst;
enum emulation_result emulated = EMULATE_DONE; enum emulation_result emulated = EMULATE_DONE;
int ax_rd, ax_ra, ax_rb, ax_rc; int ax_rd, ax_ra, ax_rb, ax_rc;
short full_d; short full_d;
...@@ -632,7 +633,8 @@ int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu) ...@@ -632,7 +633,8 @@ int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu)
int i; int i;
#endif #endif
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
inst = ppc_inst_val(pinst);
if (emulated != EMULATE_DONE) if (emulated != EMULATE_DONE)
return emulated; return emulated;
......
...@@ -759,7 +759,7 @@ static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, ...@@ -759,7 +759,7 @@ static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
flags = DSISR_NOHPTE; flags = DSISR_NOHPTE;
if (data) { if (data) {
flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
kvmppc_core_queue_data_storage(vcpu, eaddr, flags); kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags);
} else { } else {
kvmppc_core_queue_inst_storage(vcpu, flags); kvmppc_core_queue_inst_storage(vcpu, flags);
} }
...@@ -1044,6 +1044,8 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) ...@@ -1044,6 +1044,8 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
{ {
if (fscr & FSCR_SCV) if (fscr & FSCR_SCV)
fscr &= ~FSCR_SCV; /* SCV must not be enabled */ fscr &= ~FSCR_SCV; /* SCV must not be enabled */
/* Prohibit prefixed instructions for now */
fscr &= ~FSCR_PREFIX;
if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
/* TAR got dropped, drop it in shadow too */ /* TAR got dropped, drop it in shadow too */
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
...@@ -1079,7 +1081,7 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1079,7 +1081,7 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{ {
enum emulation_result er; enum emulation_result er;
ulong flags; ulong flags;
u32 last_inst; ppc_inst_t last_inst;
int emul, r; int emul, r;
/* /*
...@@ -1100,9 +1102,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1100,9 +1102,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
if (kvmppc_get_msr(vcpu) & MSR_PR) { if (kvmppc_get_msr(vcpu) & MSR_PR) {
#ifdef EXIT_DEBUG #ifdef EXIT_DEBUG
pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
kvmppc_get_pc(vcpu), last_inst); kvmppc_get_pc(vcpu), ppc_inst_val(last_inst));
#endif #endif
if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { if ((ppc_inst_val(last_inst) & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
kvmppc_core_queue_program(vcpu, flags); kvmppc_core_queue_program(vcpu, flags);
return RESUME_GUEST; return RESUME_GUEST;
} }
...@@ -1119,7 +1121,7 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1119,7 +1121,7 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
break; break;
case EMULATE_FAIL: case EMULATE_FAIL:
pr_crit("%s: emulation at %lx failed (%08x)\n", pr_crit("%s: emulation at %lx failed (%08x)\n",
__func__, kvmppc_get_pc(vcpu), last_inst); __func__, kvmppc_get_pc(vcpu), ppc_inst_val(last_inst));
kvmppc_core_queue_program(vcpu, flags); kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1236,7 +1238,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1236,7 +1238,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else { } else {
kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); kvmppc_core_queue_data_storage(vcpu, 0, dar, fault_dsisr);
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
...@@ -1281,7 +1283,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1281,7 +1283,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
break; break;
case BOOK3S_INTERRUPT_SYSCALL: case BOOK3S_INTERRUPT_SYSCALL:
{ {
u32 last_sc; ppc_inst_t last_sc;
int emul; int emul;
/* Get last sc for papr */ /* Get last sc for papr */
...@@ -1296,7 +1298,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1296,7 +1298,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
} }
if (vcpu->arch.papr_enabled && if (vcpu->arch.papr_enabled &&
(last_sc == 0x44000022) && (ppc_inst_val(last_sc) == 0x44000022) &&
!(kvmppc_get_msr(vcpu) & MSR_PR)) { !(kvmppc_get_msr(vcpu) & MSR_PR)) {
/* SC 1 papr hypercalls */ /* SC 1 papr hypercalls */
ulong cmd = kvmppc_get_gpr(vcpu, 3); ulong cmd = kvmppc_get_gpr(vcpu, 3);
...@@ -1348,7 +1350,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1348,7 +1350,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{ {
int ext_msr = 0; int ext_msr = 0;
int emul; int emul;
u32 last_inst; ppc_inst_t last_inst;
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
/* Do paired single instruction emulation */ /* Do paired single instruction emulation */
...@@ -1382,15 +1384,15 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1382,15 +1384,15 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
} }
case BOOK3S_INTERRUPT_ALIGNMENT: case BOOK3S_INTERRUPT_ALIGNMENT:
{ {
u32 last_inst; ppc_inst_t last_inst;
int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
if (emul == EMULATE_DONE) { if (emul == EMULATE_DONE) {
u32 dsisr; u32 dsisr;
u64 dar; u64 dar;
dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); dsisr = kvmppc_alignment_dsisr(vcpu, ppc_inst_val(last_inst));
dar = kvmppc_alignment_dar(vcpu, last_inst); dar = kvmppc_alignment_dar(vcpu, ppc_inst_val(last_inst));
kvmppc_set_dsisr(vcpu, dsisr); kvmppc_set_dsisr(vcpu, dsisr);
kvmppc_set_dar(vcpu, dar); kvmppc_set_dar(vcpu, dar);
......
...@@ -123,6 +123,7 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC ...@@ -123,6 +123,7 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
kvmppc_handler_skip_ins: kvmppc_handler_skip_ins:
/* Patch the IP to the next instruction */ /* Patch the IP to the next instruction */
/* Note that prefixed instructions are disabled in PR KVM for now */
mfsrr0 r12 mfsrr0 r12
addi r12, r12, 4 addi r12, r12, 4
mtsrr0 r12 mtsrr0 r12
......
...@@ -283,9 +283,10 @@ void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ...@@ -283,9 +283,10 @@ void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
} }
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
ulong dear_flags, ulong esr_flags) ulong dear_flags, ulong esr_flags)
{ {
WARN_ON_ONCE(srr1_flags);
vcpu->arch.queued_dear = dear_flags; vcpu->arch.queued_dear = dear_flags;
vcpu->arch.queued_esr = esr_flags; vcpu->arch.queued_esr = esr_flags;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
...@@ -316,14 +317,16 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) ...@@ -316,14 +317,16 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
} }
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
WARN_ON_ONCE(srr1_flags);
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
} }
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
WARN_ON_ONCE(srr1_flags);
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
} }
#endif #endif
...@@ -623,7 +626,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu) ...@@ -623,7 +626,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
} }
void kvmppc_watchdog_func(struct timer_list *t) static void kvmppc_watchdog_func(struct timer_list *t)
{ {
struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer); struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
u32 tsr, new_tsr; u32 tsr, new_tsr;
...@@ -841,7 +844,7 @@ static int emulation_exit(struct kvm_vcpu *vcpu) ...@@ -841,7 +844,7 @@ static int emulation_exit(struct kvm_vcpu *vcpu)
return RESUME_GUEST; return RESUME_GUEST;
case EMULATE_FAIL: case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", printk(KERN_CRIT "%s: emulation at %lx failed (%08lx)\n",
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and /* For debugging, encode the failing instruction and
* report it to userspace. */ * report it to userspace. */
...@@ -1000,7 +1003,7 @@ static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu, ...@@ -1000,7 +1003,7 @@ static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
} }
} }
/** /*
* kvmppc_handle_exit * kvmppc_handle_exit
* *
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
...@@ -1012,6 +1015,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1012,6 +1015,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
int s; int s;
int idx; int idx;
u32 last_inst = KVM_INST_FETCH_FAILED; u32 last_inst = KVM_INST_FETCH_FAILED;
ppc_inst_t pinst;
enum emulation_result emulated = EMULATE_DONE; enum emulation_result emulated = EMULATE_DONE;
/* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */ /* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */
...@@ -1031,12 +1035,15 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1031,12 +1035,15 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
case BOOKE_INTERRUPT_DATA_STORAGE: case BOOKE_INTERRUPT_DATA_STORAGE:
case BOOKE_INTERRUPT_DTLB_MISS: case BOOKE_INTERRUPT_DTLB_MISS:
case BOOKE_INTERRUPT_HV_PRIV: case BOOKE_INTERRUPT_HV_PRIV:
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
last_inst = ppc_inst_val(pinst);
break; break;
case BOOKE_INTERRUPT_PROGRAM: case BOOKE_INTERRUPT_PROGRAM:
/* SW breakpoints arrive as illegal instructions on HV */ /* SW breakpoints arrive as illegal instructions on HV */
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
last_inst = ppc_inst_val(pinst);
}
break; break;
default: default:
break; break;
...@@ -1225,7 +1232,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1225,7 +1232,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
#endif #endif
case BOOKE_INTERRUPT_DATA_STORAGE: case BOOKE_INTERRUPT_DATA_STORAGE:
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, kvmppc_core_queue_data_storage(vcpu, 0, vcpu->arch.fault_dear,
vcpu->arch.fault_esr); vcpu->arch.fault_esr);
kvmppc_account_exit(vcpu, DSI_EXITS); kvmppc_account_exit(vcpu, DSI_EXITS);
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -1946,7 +1953,8 @@ static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr, ...@@ -1946,7 +1953,8 @@ static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
dbg_reg->dbcr0 |= DBCR0_IDM; dbg_reg->dbcr0 |= DBCR0_IDM;
return 0; return 0;
} }
void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set) static void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap,
bool set)
{ {
/* XXX: Add similar MSR protection for BookE-PR */ /* XXX: Add similar MSR protection for BookE-PR */
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
......
...@@ -109,4 +109,7 @@ static inline void kvmppc_clear_dbsr(void) ...@@ -109,4 +109,7 @@ static inline void kvmppc_clear_dbsr(void)
{ {
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR)); mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
} }
int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr);
#endif /* __KVM_BOOKE_H__ */ #endif /* __KVM_BOOKE_H__ */
...@@ -139,7 +139,7 @@ END_BTB_FLUSH_SECTION ...@@ -139,7 +139,7 @@ END_BTB_FLUSH_SECTION
* kvmppc_get_last_inst(). * kvmppc_get_last_inst().
*/ */
li r9, KVM_INST_FETCH_FAILED li r9, KVM_INST_FETCH_FAILED
stw r9, VCPU_LAST_INST(r4) PPC_STL r9, VCPU_LAST_INST(r4)
.endif .endif
.if \flags & NEED_ESR .if \flags & NEED_ESR
......
...@@ -623,7 +623,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, ...@@ -623,7 +623,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_fetch_type type, u32 *instr) enum instruction_fetch_type type, unsigned long *instr)
{ {
gva_t geaddr; gva_t geaddr;
hpa_t addr; hpa_t addr;
...@@ -713,7 +713,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, ...@@ -713,7 +713,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
} }
#else #else
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_fetch_type type, u32 *instr) enum instruction_fetch_type type, unsigned long *instr)
{ {
return EMULATE_AGAIN; return EMULATE_AGAIN;
} }
......
...@@ -168,7 +168,7 @@ static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) ...@@ -168,7 +168,7 @@ static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
kvmppc_booke_vcpu_put(vcpu); kvmppc_booke_vcpu_put(vcpu);
} }
int kvmppc_e500mc_check_processor_compat(void) static int kvmppc_e500mc_check_processor_compat(void)
{ {
int r; int r;
......
...@@ -194,6 +194,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -194,6 +194,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
{ {
u32 inst; u32 inst;
ppc_inst_t pinst;
int rs, rt, sprn; int rs, rt, sprn;
enum emulation_result emulated; enum emulation_result emulated;
int advance = 1; int advance = 1;
...@@ -201,7 +202,8 @@ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) ...@@ -201,7 +202,8 @@ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
/* this default type might be overwritten by subcategories */ /* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
inst = ppc_inst_val(pinst);
if (emulated != EMULATE_DONE) if (emulated != EMULATE_DONE)
return emulated; return emulated;
...@@ -299,6 +301,10 @@ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) ...@@ -299,6 +301,10 @@ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
/* Advance past emulated instruction. */ /* Advance past emulated instruction. */
/*
* If this ever handles prefixed instructions, the 4
* will need to become ppc_inst_len(pinst) instead.
*/
if (advance) if (advance)
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
kvmppc_core_queue_fpunavail(vcpu); kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true; return true;
} }
...@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) ...@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
kvmppc_core_queue_vsx_unavail(vcpu); kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true; return true;
} }
...@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) ...@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
kvmppc_core_queue_vec_unavail(vcpu); kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true; return true;
} }
...@@ -71,7 +71,7 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) ...@@ -71,7 +71,7 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
*/ */
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{ {
u32 inst; ppc_inst_t inst;
enum emulation_result emulated = EMULATE_FAIL; enum emulation_result emulated = EMULATE_FAIL;
struct instruction_op op; struct instruction_op op;
...@@ -93,7 +93,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -93,7 +93,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
vcpu->arch.regs.msr = vcpu->arch.shared->msr; vcpu->arch.regs.msr = vcpu->arch.shared->msr;
if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) { if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK; int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type); int size = GETSIZE(op.type);
...@@ -356,11 +356,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -356,11 +356,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
} }
} }
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
/* Advance past emulated instruction. */ /* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL) if (emulated != EMULATE_FAIL)
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
return emulated; return emulated;
} }
...@@ -304,11 +304,11 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) ...@@ -304,11 +304,11 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
break; break;
case EMULATE_FAIL: case EMULATE_FAIL:
{ {
u32 last_inst; ppc_inst_t last_inst;
kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n", kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
last_inst); ppc_inst_val(last_inst));
/* /*
* Injecting a Data Storage here is a bit more * Injecting a Data Storage here is a bit more
...@@ -321,7 +321,9 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) ...@@ -321,7 +321,9 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
if (vcpu->mmio_is_write) if (vcpu->mmio_is_write)
dsisr |= DSISR_ISSTORE; dsisr |= DSISR_ISSTORE;
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr); kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
vcpu->arch.vaddr_accessed, dsisr);
} else { } else {
/* /*
* BookE does not send a SIGBUS on a bad * BookE does not send a SIGBUS on a bad
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment