Commit 6f63e81b authored by Bin Lu's avatar Bin Lu Committed by Paul Mackerras

KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions

This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.

The instructions that this adds emulation for are:

- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x

[paulus@ozlabs.org - some cleanups, fixes and rework, make it
 compile for Book E, fix build when PR KVM is built in]
Signed-off-by: default avatarBin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 307d9279
...@@ -87,6 +87,11 @@ static inline unsigned int get_oc(u32 inst) ...@@ -87,6 +87,11 @@ static inline unsigned int get_oc(u32 inst)
return (inst >> 11) & 0x7fff; return (inst >> 11) & 0x7fff;
} }
static inline unsigned int get_tx_or_sx(u32 inst)
{
return (inst) & 0x1;
}
#define IS_XFORM(inst) (get_op(inst) == 31) #define IS_XFORM(inst) (get_op(inst) == 31)
#define IS_DSFORM(inst) (get_op(inst) >= 56) #define IS_DSFORM(inst) (get_op(inst) >= 56)
......
...@@ -438,6 +438,11 @@ struct mmio_hpte_cache { ...@@ -438,6 +438,11 @@ struct mmio_hpte_cache {
unsigned int index; unsigned int index;
}; };
#define KVMPPC_VSX_COPY_NONE 0
#define KVMPPC_VSX_COPY_WORD 1
#define KVMPPC_VSX_COPY_DWORD 2
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
struct openpic; struct openpic;
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
...@@ -641,6 +646,21 @@ struct kvm_vcpu_arch { ...@@ -641,6 +646,21 @@ struct kvm_vcpu_arch {
u8 io_gpr; /* GPR used as IO source/target */ u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_host_swabbed; u8 mmio_host_swabbed;
u8 mmio_sign_extend; u8 mmio_sign_extend;
/* conversion between single and double precision */
u8 mmio_sp64_extend;
/*
* Number of simulations for vsx.
* If we use 2*8bytes to simulate 1*16bytes,
* then the number should be 2 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD.
* If we use 4*4bytes to simulate 1*16bytes,
* the number should be 4 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
*/
u8 mmio_vsx_copy_nums;
u8 mmio_vsx_offset;
u8 mmio_vsx_copy_type;
u8 mmio_vsx_tx_sx_enabled;
u8 osi_needed; u8 osi_needed;
u8 osi_enabled; u8 osi_enabled;
u8 papr_enabled; u8 papr_enabled;
...@@ -729,6 +749,8 @@ struct kvm_vcpu_arch { ...@@ -729,6 +749,8 @@ struct kvm_vcpu_arch {
}; };
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET] #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
#define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j])
#define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i])
/* Values for vcpu->arch.state */ /* Values for vcpu->arch.state */
#define KVMPPC_VCPU_NOTREADY 0 #define KVMPPC_VCPU_NOTREADY 0
...@@ -742,6 +764,7 @@ struct kvm_vcpu_arch { ...@@ -742,6 +764,7 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_FPR 0x0020 #define KVM_MMIO_REG_FPR 0x0020
#define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060 #define KVM_MMIO_REG_FQPR 0x0060
#define KVM_MMIO_REG_VSX 0x0080
#define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE #define __KVM_HAVE_CREATE_DEVICE
......
...@@ -78,9 +78,15 @@ extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -78,9 +78,15 @@ extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian); int is_default_endian);
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, u64 val, unsigned int bytes,
int is_default_endian); int is_default_endian);
extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs, unsigned int bytes,
int is_default_endian);
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_type type, u32 *inst); enum instruction_type type, u32 *inst);
...@@ -243,6 +249,7 @@ union kvmppc_one_reg { ...@@ -243,6 +249,7 @@ union kvmppc_one_reg {
u64 dval; u64 dval;
vector128 vval; vector128 vval;
u64 vsxval[2]; u64 vsxval[2];
u32 vsx32val[4];
struct { struct {
u64 addr; u64 addr;
u64 length; u64 length;
......
...@@ -86,7 +86,9 @@ ...@@ -86,7 +86,9 @@
#define OP_TRAP_64 2 #define OP_TRAP_64 2
#define OP_31_XOP_TRAP 4 #define OP_31_XOP_TRAP 4
#define OP_31_XOP_LDX 21
#define OP_31_XOP_LWZX 23 #define OP_31_XOP_LWZX 23
#define OP_31_XOP_LDUX 53
#define OP_31_XOP_DCBST 54 #define OP_31_XOP_DCBST 54
#define OP_31_XOP_LWZUX 55 #define OP_31_XOP_LWZUX 55
#define OP_31_XOP_TRAP_64 68 #define OP_31_XOP_TRAP_64 68
...@@ -99,6 +101,7 @@ ...@@ -99,6 +101,7 @@
#define OP_31_XOP_LHZX 279 #define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311 #define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MFSPR 339 #define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LWAX 341
#define OP_31_XOP_LHAX 343 #define OP_31_XOP_LHAX 343
#define OP_31_XOP_LHAUX 375 #define OP_31_XOP_LHAUX 375
#define OP_31_XOP_STHX 407 #define OP_31_XOP_STHX 407
...@@ -108,10 +111,46 @@ ...@@ -108,10 +111,46 @@
#define OP_31_XOP_LWBRX 534 #define OP_31_XOP_LWBRX 534
#define OP_31_XOP_TLBSYNC 566 #define OP_31_XOP_TLBSYNC 566
#define OP_31_XOP_STWBRX 662 #define OP_31_XOP_STWBRX 662
#define OP_31_XOP_STFSX 663
#define OP_31_XOP_STFSUX 695
#define OP_31_XOP_STFDX 727
#define OP_31_XOP_STFDUX 759
#define OP_31_XOP_LHBRX 790 #define OP_31_XOP_LHBRX 790
#define OP_31_XOP_STHBRX 918 #define OP_31_XOP_STHBRX 918
#define OP_31_XOP_STFIWX 983
/* VSX Scalar Load Instructions */
#define OP_31_XOP_LXSDX 588
#define OP_31_XOP_LXSSPX 524
#define OP_31_XOP_LXSIWAX 76
#define OP_31_XOP_LXSIWZX 12
/* VSX Scalar Store Instructions */
#define OP_31_XOP_STXSDX 716
#define OP_31_XOP_STXSSPX 652
#define OP_31_XOP_STXSIWX 140
/* VSX Vector Load Instructions */
#define OP_31_XOP_LXVD2X 844
#define OP_31_XOP_LXVW4X 780
/* VSX Vector Load and Splat Instruction */
#define OP_31_XOP_LXVDSX 332
/* VSX Vector Store Instructions */
#define OP_31_XOP_STXVD2X 972
#define OP_31_XOP_STXVW4X 908
#define OP_31_XOP_LFSX 535
#define OP_31_XOP_LFSUX 567
#define OP_31_XOP_LFDX 599
#define OP_31_XOP_LFDUX 631
#define OP_LWZ 32 #define OP_LWZ 32
#define OP_STFS 52
#define OP_STFSU 53
#define OP_STFD 54
#define OP_STFDU 55
#define OP_LD 58 #define OP_LD 58
#define OP_LWZU 33 #define OP_LWZU 33
#define OP_LBZ 34 #define OP_LBZ 34
...@@ -127,6 +166,17 @@ ...@@ -127,6 +166,17 @@
#define OP_LHAU 43 #define OP_LHAU 43
#define OP_STH 44 #define OP_STH 44
#define OP_STHU 45 #define OP_STHU 45
#define OP_LMW 46
#define OP_STMW 47
#define OP_LFS 48
#define OP_LFSU 49
#define OP_LFD 50
#define OP_LFDU 51
#define OP_STFS 52
#define OP_STFSU 53
#define OP_STFD 54
#define OP_STFDU 55
#define OP_LQ 56
/* sorted alphabetically */ /* sorted alphabetically */
#define PPC_INST_BHRBE 0x7c00025c #define PPC_INST_BHRBE 0x7c00025c
......
This diff is collapsed.
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/switch_to.h>
#include "timing.h" #include "timing.h"
#include "irq.h" #include "irq.h"
#include "../mm/mmu_decl.h" #include "../mm/mmu_decl.h"
...@@ -801,6 +802,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, ...@@ -801,6 +802,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
} }
#ifdef CONFIG_VSX
static inline int kvmppc_get_vsr_dword_offset(int index)
{
int offset;
if ((index != 0) && (index != 1))
return -1;
#ifdef __BIG_ENDIAN
offset = index;
#else
offset = 1 - index;
#endif
return offset;
}
static inline int kvmppc_get_vsr_word_offset(int index)
{
int offset;
if ((index > 3) || (index < 0))
return -1;
#ifdef __BIG_ENDIAN
offset = index;
#else
offset = 3 - index;
#endif
return offset;
}
static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
u64 gpr)
{
union kvmppc_one_reg val;
int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (offset == -1)
return;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
val.vval = VCPU_VSX_VR(vcpu, index);
val.vsxval[offset] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval;
} else {
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
}
}
static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
u64 gpr)
{
union kvmppc_one_reg val;
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
val.vval = VCPU_VSX_VR(vcpu, index);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval;
} else {
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
}
}
static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
u32 gpr32)
{
union kvmppc_one_reg val;
int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
int dword_offset, word_offset;
if (offset == -1)
return;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
val.vval = VCPU_VSX_VR(vcpu, index);
val.vsx32val[offset] = gpr32;
VCPU_VSX_VR(vcpu, index) = val.vval;
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
val.vsx32val[word_offset] = gpr32;
VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
}
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC_FPU
static inline u64 sp_to_dp(u32 fprs)
{
u64 fprd;
preempt_disable();
enable_kernel_fp();
asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
: "fr0");
preempt_enable();
return fprd;
}
static inline u32 dp_to_sp(u64 fprd)
{
u32 fprs;
preempt_disable();
enable_kernel_fp();
asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
: "fr0");
preempt_enable();
return fprs;
}
#else
#define sp_to_dp(x) (x)
#define dp_to_sp(x) (x)
#endif /* CONFIG_PPC_FPU */
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run) struct kvm_run *run)
{ {
...@@ -827,6 +951,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -827,6 +951,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
} }
} }
/* conversion between single and double precision */
if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
gpr = sp_to_dp(gpr);
if (vcpu->arch.mmio_sign_extend) { if (vcpu->arch.mmio_sign_extend) {
switch (run->mmio.len) { switch (run->mmio.len) {
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
...@@ -843,8 +971,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -843,8 +971,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
} }
} }
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
case KVM_MMIO_REG_GPR: case KVM_MMIO_REG_GPR:
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
...@@ -860,6 +986,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -860,6 +986,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break; break;
#endif
#ifdef CONFIG_VSX
case KVM_MMIO_REG_VSX:
if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
kvmppc_set_vsr_dword(vcpu, gpr);
else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
kvmppc_set_vsr_word(vcpu, gpr);
else if (vcpu->arch.mmio_vsx_copy_type ==
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
kvmppc_set_vsr_dword_dump(vcpu, gpr);
break;
#endif #endif
default: default:
BUG(); BUG();
...@@ -927,6 +1064,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -927,6 +1064,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
} }
#ifdef CONFIG_VSX
int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend)
{
enum emulation_result emulated = EMULATE_DONE;
/* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
(vcpu->arch.mmio_vsx_copy_nums < 0) ) {
return EMULATE_FAIL;
}
while (vcpu->arch.mmio_vsx_copy_nums) {
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
is_default_endian, mmio_sign_extend);
if (emulated != EMULATE_DONE)
break;
vcpu->arch.paddr_accessed += run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
}
return emulated;
}
#endif /* CONFIG_VSX */
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian) u64 val, unsigned int bytes, int is_default_endian)
{ {
...@@ -952,6 +1118,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -952,6 +1118,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_needed = 1; vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1; vcpu->mmio_is_write = 1;
if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
val = dp_to_sp(val);
/* Store the value at the lowest bytes in 'data'. */ /* Store the value at the lowest bytes in 'data'. */
if (!host_swabbed) { if (!host_swabbed) {
switch (bytes) { switch (bytes) {
...@@ -985,6 +1154,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -985,6 +1154,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
EXPORT_SYMBOL_GPL(kvmppc_handle_store); EXPORT_SYMBOL_GPL(kvmppc_handle_store);
#ifdef CONFIG_VSX
static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
{
u32 dword_offset, word_offset;
union kvmppc_one_reg reg;
int vsx_offset = 0;
int copy_type = vcpu->arch.mmio_vsx_copy_type;
int result = 0;
switch (copy_type) {
case KVMPPC_VSX_COPY_DWORD:
vsx_offset =
kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
if (vsx_offset == -1) {
result = -1;
break;
}
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
} else {
reg.vval = VCPU_VSX_VR(vcpu, rs);
*val = reg.vsxval[vsx_offset];
}
break;
case KVMPPC_VSX_COPY_WORD:
vsx_offset =
kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
if (vsx_offset == -1) {
result = -1;
break;
}
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
reg.vval = VCPU_VSX_VR(vcpu, rs);
*val = reg.vsx32val[vsx_offset];
}
break;
default:
result = -1;
break;
}
return result;
}
int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs, unsigned int bytes, int is_default_endian)
{
u64 val;
enum emulation_result emulated = EMULATE_DONE;
vcpu->arch.io_gpr = rs;
/* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
(vcpu->arch.mmio_vsx_copy_nums < 0) ) {
return EMULATE_FAIL;
}
while (vcpu->arch.mmio_vsx_copy_nums) {
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
return EMULATE_FAIL;
emulated = kvmppc_handle_store(run, vcpu,
val, bytes, is_default_endian);
if (emulated != EMULATE_DONE)
break;
vcpu->arch.paddr_accessed += run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
}
return emulated;
}
static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
} else {
emulated = kvmppc_handle_vsx_store(run, vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
}
switch (emulated) {
case EMULATE_DO_MMIO:
run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST;
break;
case EMULATE_FAIL:
pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
r = RESUME_HOST;
break;
default:
r = RESUME_GUEST;
break;
}
return r;
}
#endif /* CONFIG_VSX */
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{ {
int r = 0; int r = 0;
...@@ -1087,13 +1379,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1087,13 +1379,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int r; int r;
sigset_t sigsaved; sigset_t sigsaved;
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
if (vcpu->mmio_needed) { if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write) if (!vcpu->mmio_is_write)
kvmppc_complete_mmio_load(vcpu, run); kvmppc_complete_mmio_load(vcpu, run);
vcpu->mmio_needed = 0; #ifdef CONFIG_VSX
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
}
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
return r;
}
}
#endif
} else if (vcpu->arch.osi_needed) { } else if (vcpu->arch.osi_needed) {
u64 *gprs = run->osi.gprs; u64 *gprs = run->osi.gprs;
int i; int i;
...@@ -1115,6 +1418,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1115,6 +1418,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
#endif #endif
} }
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
if (run->immediate_exit) if (run->immediate_exit)
r = -EINTR; r = -EINTR;
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment