Commit da2a32b8 authored by Simon Guo's avatar Simon Guo Committed by Paul Mackerras

KVM: PPC: Expand mmio_vsx_copy_type to cover VMX load/store element types

VSX MMIO emulation uses mmio_vsx_copy_type to represent VSX emulated
element size/type, such as KVMPPC_VSX_COPY_DWORD_LOAD, etc. This
patch expands mmio_vsx_copy_type to cover VMX copy type, such as
KVMPPC_VMX_COPY_BYTE(stvebx/lvebx), etc. As a result,
mmio_vsx_copy_type is also renamed to mmio_copy_type.

It is a preparation for reimplementing VMX MMIO emulation.
Signed-off-by: default avatarSimon Guo <wei.guo.simon@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent b01c78c2
...@@ -455,6 +455,11 @@ struct mmio_hpte_cache { ...@@ -455,6 +455,11 @@ struct mmio_hpte_cache {
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3 #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
#define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4 #define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4
#define KVMPPC_VMX_COPY_BYTE 8
#define KVMPPC_VMX_COPY_HWORD 9
#define KVMPPC_VMX_COPY_WORD 10
#define KVMPPC_VMX_COPY_DWORD 11
struct openpic; struct openpic;
/* W0 and W1 of a XIVE thread management context */ /* W0 and W1 of a XIVE thread management context */
...@@ -677,16 +682,16 @@ struct kvm_vcpu_arch { ...@@ -677,16 +682,16 @@ struct kvm_vcpu_arch {
* Number of simulations for vsx. * Number of simulations for vsx.
* If we use 2*8bytes to simulate 1*16bytes, * If we use 2*8bytes to simulate 1*16bytes,
* then the number should be 2 and * then the number should be 2 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD. * mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
* If we use 4*4bytes to simulate 1*16bytes, * If we use 4*4bytes to simulate 1*16bytes,
* the number should be 4 and * the number should be 4 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD. * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
*/ */
u8 mmio_vsx_copy_nums; u8 mmio_vsx_copy_nums;
u8 mmio_vsx_offset; u8 mmio_vsx_offset;
u8 mmio_vsx_copy_type;
u8 mmio_vsx_tx_sx_enabled; u8 mmio_vsx_tx_sx_enabled;
u8 mmio_vmx_copy_nums; u8 mmio_vmx_copy_nums;
u8 mmio_copy_type;
u8 osi_needed; u8 osi_needed;
u8 osi_enabled; u8 osi_enabled;
u8 papr_enabled; u8 papr_enabled;
......
...@@ -109,7 +109,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -109,7 +109,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
vcpu->arch.mmio_vsx_copy_nums = 0; vcpu->arch.mmio_vsx_copy_nums = 0;
vcpu->arch.mmio_vsx_offset = 0; vcpu->arch.mmio_vsx_offset = 0;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
vcpu->arch.mmio_sp64_extend = 0; vcpu->arch.mmio_sp64_extend = 0;
vcpu->arch.mmio_sign_extend = 0; vcpu->arch.mmio_sign_extend = 0;
vcpu->arch.mmio_vmx_copy_nums = 0; vcpu->arch.mmio_vmx_copy_nums = 0;
...@@ -175,17 +175,17 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -175,17 +175,17 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (op.element_size == 8) { if (op.element_size == 8) {
if (op.vsx_flags & VSX_SPLAT) if (op.vsx_flags & VSX_SPLAT)
vcpu->arch.mmio_vsx_copy_type = vcpu->arch.mmio_copy_type =
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
else else
vcpu->arch.mmio_vsx_copy_type = vcpu->arch.mmio_copy_type =
KVMPPC_VSX_COPY_DWORD; KVMPPC_VSX_COPY_DWORD;
} else if (op.element_size == 4) { } else if (op.element_size == 4) {
if (op.vsx_flags & VSX_SPLAT) if (op.vsx_flags & VSX_SPLAT)
vcpu->arch.mmio_vsx_copy_type = vcpu->arch.mmio_copy_type =
KVMPPC_VSX_COPY_WORD_LOAD_DUMP; KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
else else
vcpu->arch.mmio_vsx_copy_type = vcpu->arch.mmio_copy_type =
KVMPPC_VSX_COPY_WORD; KVMPPC_VSX_COPY_WORD;
} else } else
break; break;
...@@ -261,10 +261,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -261,10 +261,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1; vcpu->arch.mmio_sp64_extend = 1;
if (op.element_size == 8) if (op.element_size == 8)
vcpu->arch.mmio_vsx_copy_type = vcpu->arch.mmio_copy_type =
KVMPPC_VSX_COPY_DWORD; KVMPPC_VSX_COPY_DWORD;
else if (op.element_size == 4) else if (op.element_size == 4)
vcpu->arch.mmio_vsx_copy_type = vcpu->arch.mmio_copy_type =
KVMPPC_VSX_COPY_WORD; KVMPPC_VSX_COPY_WORD;
else else
break; break;
......
...@@ -1080,14 +1080,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -1080,14 +1080,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
if (vcpu->kvm->arch.kvm_ops->giveup_ext) if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
kvmppc_set_vsr_dword(vcpu, gpr); kvmppc_set_vsr_dword(vcpu, gpr);
else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
kvmppc_set_vsr_word(vcpu, gpr); kvmppc_set_vsr_word(vcpu, gpr);
else if (vcpu->arch.mmio_vsx_copy_type == else if (vcpu->arch.mmio_copy_type ==
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
kvmppc_set_vsr_dword_dump(vcpu, gpr); kvmppc_set_vsr_dword_dump(vcpu, gpr);
else if (vcpu->arch.mmio_vsx_copy_type == else if (vcpu->arch.mmio_copy_type ==
KVMPPC_VSX_COPY_WORD_LOAD_DUMP) KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
kvmppc_set_vsr_word_dump(vcpu, gpr); kvmppc_set_vsr_word_dump(vcpu, gpr);
break; break;
...@@ -1260,7 +1260,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) ...@@ -1260,7 +1260,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
u32 dword_offset, word_offset; u32 dword_offset, word_offset;
union kvmppc_one_reg reg; union kvmppc_one_reg reg;
int vsx_offset = 0; int vsx_offset = 0;
int copy_type = vcpu->arch.mmio_vsx_copy_type; int copy_type = vcpu->arch.mmio_copy_type;
int result = 0; int result = 0;
switch (copy_type) { switch (copy_type) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment