Commit 3a1174cd authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-ppc-next-4.19-1' of...

Merge tag 'kvm-ppc-next-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD

PPC KVM update for 4.19.

This update adds no new features; it just has some minor code cleanups
and bug fixes, including a fix to allow us to create KVM_MAX_VCPUS
vCPUs on POWER9 in all CPU threading modes.
parents 6f0d349d b5c6f760
...@@ -390,4 +390,51 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); ...@@ -390,4 +390,51 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
#define SPLIT_HACK_MASK 0xff000000 #define SPLIT_HACK_MASK 0xff000000
#define SPLIT_HACK_OFFS 0xfb000000 #define SPLIT_HACK_OFFS 0xfb000000
/*
* This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
* [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
* (but not its actual threading mode, which is not available) to avoid
* collisions.
*
* The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
* 0) unchanged: if the guest is filling each VCORE completely then it will be
* using consecutive IDs and it will fill the space without any packing.
*
* For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
* KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
* added to avoid collisions.
*
* VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
* possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
* can be safely packed into the second half of each VCORE by adding an offset
* of (stride / 2).
*
* Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
* (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
* VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
*
* Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
* stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
* must be free to use.
*
* (The offsets for each block are stored in block_offsets[], indexed by the
* block number if the stride is 8. For cases where the guest's stride is less
* than 8, we can re-use the block_offsets array by multiplying the block
* number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
*/
static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
{
const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
int stride = kvm->arch.emul_smt_mode;
int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
u32 packed_id;
if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
return 0;
packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
return 0;
return packed_id;
}
#endif /* __ASM_KVM_BOOK3S_H__ */ #endif /* __ASM_KVM_BOOK3S_H__ */
...@@ -42,7 +42,14 @@ ...@@ -42,7 +42,14 @@
#define KVM_USER_MEM_SLOTS 512 #define KVM_USER_MEM_SLOTS 512
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#define KVM_MAX_VCPU_ID (threads_per_subcore * KVM_MAX_VCORES)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
#define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES)
#else
#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#define __KVM_HAVE_ARCH_INTC_INITIALIZED #define __KVM_HAVE_ARCH_INTC_INITIALIZED
...@@ -672,7 +679,7 @@ struct kvm_vcpu_arch { ...@@ -672,7 +679,7 @@ struct kvm_vcpu_arch {
gva_t vaddr_accessed; gva_t vaddr_accessed;
pgd_t *pgdir; pgd_t *pgdir;
u8 io_gpr; /* GPR used as IO source/target */ u16 io_gpr; /* GPR used as IO source/target */
u8 mmio_host_swabbed; u8 mmio_host_swabbed;
u8 mmio_sign_extend; u8 mmio_sign_extend;
/* conversion between single and double precision */ /* conversion between single and double precision */
...@@ -688,7 +695,6 @@ struct kvm_vcpu_arch { ...@@ -688,7 +695,6 @@ struct kvm_vcpu_arch {
*/ */
u8 mmio_vsx_copy_nums; u8 mmio_vsx_copy_nums;
u8 mmio_vsx_offset; u8 mmio_vsx_offset;
u8 mmio_vsx_tx_sx_enabled;
u8 mmio_vmx_copy_nums; u8 mmio_vmx_copy_nums;
u8 mmio_vmx_offset; u8 mmio_vmx_offset;
u8 mmio_copy_type; u8 mmio_copy_type;
...@@ -801,14 +807,14 @@ struct kvm_vcpu_arch { ...@@ -801,14 +807,14 @@ struct kvm_vcpu_arch {
#define KVMPPC_VCPU_BUSY_IN_HOST 2 #define KVMPPC_VCPU_BUSY_IN_HOST 2
/* Values for vcpu->arch.io_gpr */ /* Values for vcpu->arch.io_gpr */
#define KVM_MMIO_REG_MASK 0x001f #define KVM_MMIO_REG_MASK 0x003f
#define KVM_MMIO_REG_EXT_MASK 0xffe0 #define KVM_MMIO_REG_EXT_MASK 0xffc0
#define KVM_MMIO_REG_GPR 0x0000 #define KVM_MMIO_REG_GPR 0x0000
#define KVM_MMIO_REG_FPR 0x0020 #define KVM_MMIO_REG_FPR 0x0040
#define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_QPR 0x0080
#define KVM_MMIO_REG_FQPR 0x0060 #define KVM_MMIO_REG_FQPR 0x00c0
#define KVM_MMIO_REG_VSX 0x0080 #define KVM_MMIO_REG_VSX 0x0100
#define KVM_MMIO_REG_VMX 0x00c0 #define KVM_MMIO_REG_VMX 0x0180
#define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE #define __KVM_HAVE_CREATE_DEVICE
......
...@@ -161,7 +161,7 @@ ...@@ -161,7 +161,7 @@
#define PSSCR_ESL 0x00200000 /* Enable State Loss */ #define PSSCR_ESL 0x00200000 /* Enable State Loss */
#define PSSCR_SD 0x00400000 /* Status Disable */ #define PSSCR_SD 0x00400000 /* Status Disable */
#define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */ #define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */
#define PSSCR_GUEST_VIS 0xf0000000000003ff /* Guest-visible PSSCR fields */ #define PSSCR_GUEST_VIS 0xf0000000000003ffUL /* Guest-visible PSSCR fields */
#define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */ #define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */
#define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */ #define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */
......
...@@ -180,7 +180,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, ...@@ -180,7 +180,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if ((tbltmp->it_page_shift <= stt->page_shift) && if ((tbltmp->it_page_shift <= stt->page_shift) &&
(tbltmp->it_offset << tbltmp->it_page_shift == (tbltmp->it_offset << tbltmp->it_page_shift ==
stt->offset << stt->page_shift) && stt->offset << stt->page_shift) &&
(tbltmp->it_size << tbltmp->it_page_shift == (tbltmp->it_size << tbltmp->it_page_shift >=
stt->size << stt->page_shift)) { stt->size << stt->page_shift)) {
/* /*
* Reference the table to avoid races with * Reference the table to avoid races with
...@@ -296,7 +296,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, ...@@ -296,7 +296,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
{ {
struct kvmppc_spapr_tce_table *stt = NULL; struct kvmppc_spapr_tce_table *stt = NULL;
struct kvmppc_spapr_tce_table *siter; struct kvmppc_spapr_tce_table *siter;
unsigned long npages, size; unsigned long npages, size = args->size;
int ret = -ENOMEM; int ret = -ENOMEM;
int i; int i;
...@@ -304,7 +304,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, ...@@ -304,7 +304,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
(args->offset + args->size > (ULLONG_MAX >> args->page_shift))) (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
return -EINVAL; return -EINVAL;
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
npages = kvmppc_tce_pages(size); npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
if (ret) if (ret)
......
...@@ -128,14 +128,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); ...@@ -128,14 +128,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
* and SPURR count and should be set according to the number of * and SPURR count and should be set according to the number of
* online threads in the vcore being run. * online threads in the vcore being run.
*/ */
#define RWMR_RPA_P8_1THREAD 0x164520C62609AECA #define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9 #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL
#define RWMR_RPA_P8_3THREAD 0x164520C62609AECA #define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9 #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL
#define RWMR_RPA_P8_5THREAD 0x164520C62609AECA #define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_6THREAD 0x164520C62609AECA #define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_7THREAD 0x164520C62609AECA #define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_8THREAD 0x164520C62609AECA #define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL
static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = { static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
RWMR_RPA_P8_1THREAD, RWMR_RPA_P8_1THREAD,
...@@ -1816,7 +1816,7 @@ static int threads_per_vcore(struct kvm *kvm) ...@@ -1816,7 +1816,7 @@ static int threads_per_vcore(struct kvm *kvm)
return threads_per_subcore; return threads_per_subcore;
} }
static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
{ {
struct kvmppc_vcore *vcore; struct kvmppc_vcore *vcore;
...@@ -1830,7 +1830,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) ...@@ -1830,7 +1830,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
init_swait_queue_head(&vcore->wq); init_swait_queue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL; vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr; vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = core * kvm->arch.smt_mode; vcore->first_vcpuid = id;
vcore->kvm = kvm; vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list); INIT_LIST_HEAD(&vcore->preempt_list);
...@@ -2048,12 +2048,26 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, ...@@ -2048,12 +2048,26 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
vcore = NULL; vcore = NULL;
err = -EINVAL; err = -EINVAL;
core = id / kvm->arch.smt_mode; if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
pr_devel("KVM: VCPU ID too high\n");
core = KVM_MAX_VCORES;
} else {
BUG_ON(kvm->arch.smt_mode != 1);
core = kvmppc_pack_vcpu_id(kvm, id);
}
} else {
core = id / kvm->arch.smt_mode;
}
if (core < KVM_MAX_VCORES) { if (core < KVM_MAX_VCORES) {
vcore = kvm->arch.vcores[core]; vcore = kvm->arch.vcores[core];
if (!vcore) { if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
pr_devel("KVM: collision on id %u", id);
vcore = NULL;
} else if (!vcore) {
err = -ENOMEM; err = -ENOMEM;
vcore = kvmppc_vcore_create(kvm, core); vcore = kvmppc_vcore_create(kvm,
id & ~(kvm->arch.smt_mode - 1));
kvm->arch.vcores[core] = vcore; kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++; kvm->arch.online_vcores++;
} }
...@@ -4561,6 +4575,8 @@ static int kvmppc_book3s_init_hv(void) ...@@ -4561,6 +4575,8 @@ static int kvmppc_book3s_init_hv(void)
pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
return -ENODEV; return -ENODEV;
} }
/* presence of intc confirmed - node can be dropped again */
of_node_put(np);
} }
#endif #endif
......
...@@ -317,6 +317,11 @@ static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) ...@@ -317,6 +317,11 @@ static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
return -EBUSY; return -EBUSY;
} }
static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
{
return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
}
static u8 xive_lock_and_mask(struct kvmppc_xive *xive, static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
struct kvmppc_xive_src_block *sb, struct kvmppc_xive_src_block *sb,
struct kvmppc_xive_irq_state *state) struct kvmppc_xive_irq_state *state)
...@@ -362,7 +367,7 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive, ...@@ -362,7 +367,7 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
*/ */
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num, xive_native_configure_irq(hw_num,
xive->vp_base + state->act_server, xive_vp(xive, state->act_server),
MASKED, state->number); MASKED, state->number);
/* set old_p so we can track if an H_EOI was done */ /* set old_p so we can track if an H_EOI was done */
state->old_p = true; state->old_p = true;
...@@ -418,7 +423,7 @@ static void xive_finish_unmask(struct kvmppc_xive *xive, ...@@ -418,7 +423,7 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
*/ */
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num, xive_native_configure_irq(hw_num,
xive->vp_base + state->act_server, xive_vp(xive, state->act_server),
state->act_priority, state->number); state->act_priority, state->number);
/* If an EOI is needed, do it here */ /* If an EOI is needed, do it here */
if (!state->old_p) if (!state->old_p)
...@@ -495,7 +500,7 @@ static int xive_target_interrupt(struct kvm *kvm, ...@@ -495,7 +500,7 @@ static int xive_target_interrupt(struct kvm *kvm,
kvmppc_xive_select_irq(state, &hw_num, NULL); kvmppc_xive_select_irq(state, &hw_num, NULL);
return xive_native_configure_irq(hw_num, return xive_native_configure_irq(hw_num,
xive->vp_base + server, xive_vp(xive, server),
prio, state->number); prio, state->number);
} }
...@@ -883,7 +888,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, ...@@ -883,7 +888,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
* which is fine for a never started interrupt. * which is fine for a never started interrupt.
*/ */
xive_native_configure_irq(hw_irq, xive_native_configure_irq(hw_irq,
xive->vp_base + state->act_server, xive_vp(xive, state->act_server),
state->act_priority, state->number); state->act_priority, state->number);
/* /*
...@@ -959,7 +964,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, ...@@ -959,7 +964,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
/* Reconfigure the IPI */ /* Reconfigure the IPI */
xive_native_configure_irq(state->ipi_number, xive_native_configure_irq(state->ipi_number,
xive->vp_base + state->act_server, xive_vp(xive, state->act_server),
state->act_priority, state->number); state->act_priority, state->number);
/* /*
...@@ -1084,7 +1089,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, ...@@ -1084,7 +1089,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
pr_devel("Duplicate !\n"); pr_devel("Duplicate !\n");
return -EEXIST; return -EEXIST;
} }
if (cpu >= KVM_MAX_VCPUS) { if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
pr_devel("Out of bounds !\n"); pr_devel("Out of bounds !\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1098,7 +1103,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, ...@@ -1098,7 +1103,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
xc->xive = xive; xc->xive = xive;
xc->vcpu = vcpu; xc->vcpu = vcpu;
xc->server_num = cpu; xc->server_num = cpu;
xc->vp_id = xive->vp_base + cpu; xc->vp_id = xive_vp(xive, cpu);
xc->mfrr = 0xff; xc->mfrr = 0xff;
xc->valid = true; xc->valid = true;
......
...@@ -106,7 +106,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -106,7 +106,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
* if mmio_vsx_tx_sx_enabled == 1, copy data between * if mmio_vsx_tx_sx_enabled == 1, copy data between
* VSR[32..63] and memory * VSR[32..63] and memory
*/ */
vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
vcpu->arch.mmio_vsx_copy_nums = 0; vcpu->arch.mmio_vsx_copy_nums = 0;
vcpu->arch.mmio_vsx_offset = 0; vcpu->arch.mmio_vsx_offset = 0;
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
...@@ -242,8 +241,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -242,8 +241,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
} }
emulated = kvmppc_handle_vsx_load(run, vcpu, emulated = kvmppc_handle_vsx_load(run, vcpu,
KVM_MMIO_REG_VSX | (op.reg & 0x1f), KVM_MMIO_REG_VSX|op.reg, io_size_each,
io_size_each, 1, op.type & SIGNEXT); 1, op.type & SIGNEXT);
break; break;
} }
#endif #endif
...@@ -363,7 +362,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -363,7 +362,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
} }
emulated = kvmppc_handle_vsx_store(run, vcpu, emulated = kvmppc_handle_vsx_store(run, vcpu,
op.reg & 0x1f, io_size_each, 1); op.reg, io_size_each, 1);
break; break;
} }
#endif #endif
......
...@@ -880,10 +880,10 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, ...@@ -880,10 +880,10 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
if (offset == -1) if (offset == -1)
return; return;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) { if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index); val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsxval[offset] = gpr; val.vsxval[offset] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval; VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else { } else {
VCPU_VSX_FPR(vcpu, index, offset) = gpr; VCPU_VSX_FPR(vcpu, index, offset) = gpr;
} }
...@@ -895,11 +895,11 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, ...@@ -895,11 +895,11 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
union kvmppc_one_reg val; union kvmppc_one_reg val;
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) { if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index); val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsxval[0] = gpr; val.vsxval[0] = gpr;
val.vsxval[1] = gpr; val.vsxval[1] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval; VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else { } else {
VCPU_VSX_FPR(vcpu, index, 0) = gpr; VCPU_VSX_FPR(vcpu, index, 0) = gpr;
VCPU_VSX_FPR(vcpu, index, 1) = gpr; VCPU_VSX_FPR(vcpu, index, 1) = gpr;
...@@ -912,12 +912,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, ...@@ -912,12 +912,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
union kvmppc_one_reg val; union kvmppc_one_reg val;
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) { if (index >= 32) {
val.vsx32val[0] = gpr; val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr; val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr; val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr; val.vsx32val[3] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval; VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else { } else {
val.vsx32val[0] = gpr; val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr; val.vsx32val[1] = gpr;
...@@ -937,10 +937,10 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, ...@@ -937,10 +937,10 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
if (offset == -1) if (offset == -1)
return; return;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) { if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index); val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsx32val[offset] = gpr32; val.vsx32val[offset] = gpr32;
VCPU_VSX_VR(vcpu, index) = val.vval; VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else { } else {
dword_offset = offset / 2; dword_offset = offset / 2;
word_offset = offset % 2; word_offset = offset % 2;
...@@ -1361,10 +1361,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) ...@@ -1361,10 +1361,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
break; break;
} }
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { if (rs < 32) {
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
} else { } else {
reg.vval = VCPU_VSX_VR(vcpu, rs); reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
*val = reg.vsxval[vsx_offset]; *val = reg.vsxval[vsx_offset];
} }
break; break;
...@@ -1378,13 +1378,13 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) ...@@ -1378,13 +1378,13 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
break; break;
} }
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { if (rs < 32) {
dword_offset = vsx_offset / 2; dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2; word_offset = vsx_offset % 2;
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset]; *val = reg.vsx32val[word_offset];
} else { } else {
reg.vval = VCPU_VSX_VR(vcpu, rs); reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
*val = reg.vsx32val[vsx_offset]; *val = reg.vsx32val[vsx_offset];
} }
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment