Commit c7f38f46 authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity

KVM: PPC: Improve indirect svcpu accessors

We already have some inline fuctions we use to access vcpu or svcpu structs,
depending on whether we're on booke or book3s. Since we just put a few more
registers into the svcpu, we also need to make sure the respective callbacks
are available and get used.

So this patch moves direct use of the now in the svcpu struct fields to
inline function calls. While at it, it also moves the definition of those
inline function calls to respective header files for booke and book3s,
greatly improving readability.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 66bb1706
...@@ -71,7 +71,7 @@ struct kvmppc_sid_map { ...@@ -71,7 +71,7 @@ struct kvmppc_sid_map {
struct kvmppc_vcpu_book3s { struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
struct kvmppc_book3s_shadow_vcpu shadow_vcpu; struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct kvmppc_slb slb[64]; struct kvmppc_slb slb[64];
struct { struct {
...@@ -147,6 +147,94 @@ static inline ulong dsisr(void) ...@@ -147,6 +147,94 @@ static inline ulong dsisr(void)
} }
extern void kvm_return_point(void); extern void kvm_return_point(void);
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
if ( num < 14 ) {
to_svcpu(vcpu)->gpr[num] = val;
to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
} else
vcpu->arch.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
if ( num < 14 )
return to_svcpu(vcpu)->gpr[num];
else
return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
to_svcpu(vcpu)->cr = val;
to_book3s(vcpu)->shadow_vcpu->cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
return to_svcpu(vcpu)->cr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
to_svcpu(vcpu)->xer = val;
to_book3s(vcpu)->shadow_vcpu->xer = val;
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return to_svcpu(vcpu)->xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
to_svcpu(vcpu)->ctr = val;
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
return to_svcpu(vcpu)->ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
to_svcpu(vcpu)->lr = val;
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
return to_svcpu(vcpu)->lr;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
to_svcpu(vcpu)->pc = val;
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
return to_svcpu(vcpu)->pc;
}
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
ulong pc = kvmppc_get_pc(vcpu);
struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
/* Load the instruction manually if it failed to do so in the
* exit path */
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
return svcpu->last_inst;
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return to_svcpu(vcpu)->fault_dar;
}
/* Magic register values loaded into r3 and r4 before the 'sc' assembly /* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */ * instruction for the OSI hypercalls */
...@@ -155,4 +243,12 @@ extern void kvm_return_point(void); ...@@ -155,4 +243,12 @@ extern void kvm_return_point(void);
#define INS_DCBZ 0x7c0007ec #define INS_DCBZ 0x7c0007ec
/* Also add subarch specific defines */
#ifdef CONFIG_PPC_BOOK3S_32
#include <asm/kvm_book3s_32.h>
#else
#include <asm/kvm_book3s_64.h>
#endif
#endif /* __ASM_KVM_BOOK3S_H__ */ #endif /* __ASM_KVM_BOOK3S_H__ */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright SUSE Linux Products GmbH 2010
*
* Authors: Alexander Graf <agraf@suse.de>
*/
#ifndef __ASM_KVM_BOOKE_H__
#define __ASM_KVM_BOOKE_H__
#include <linux/types.h>
#include <linux/kvm_host.h>
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.xer = val;
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
return vcpu->arch.last_inst;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.ctr = val;
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.lr = val;
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.lr;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.pc = val;
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pc;
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dear;
}
#endif /* __ASM_KVM_BOOKE_H__ */
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
#else
#include <asm/kvm_booke.h>
#endif #endif
enum emulation_result { enum emulation_result {
...@@ -138,81 +140,4 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) ...@@ -138,81 +140,4 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
return r; return r;
} }
#ifdef CONFIG_PPC_BOOK3S
/* We assume we're always acting on the current vcpu */
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
if ( num < 14 ) {
get_paca()->shadow_vcpu.gpr[num] = val;
to_book3s(vcpu)->shadow_vcpu.gpr[num] = val;
} else
vcpu->arch.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
if ( num < 14 )
return get_paca()->shadow_vcpu.gpr[num];
else
return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
get_paca()->shadow_vcpu.cr = val;
to_book3s(vcpu)->shadow_vcpu.cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
return get_paca()->shadow_vcpu.cr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
get_paca()->shadow_vcpu.xer = val;
to_book3s(vcpu)->shadow_vcpu.xer = val;
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return get_paca()->shadow_vcpu.xer;
}
#else
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.xer = val;
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
#endif
#endif /* __POWERPC_KVM_PPC_H__ */ #endif /* __POWERPC_KVM_PPC_H__ */
...@@ -71,18 +71,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) ...@@ -71,18 +71,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); #ifdef CONFIG_PPC_BOOK3S_64
memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu)); sizeof(get_paca()->shadow_vcpu));
get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
#endif
#ifdef CONFIG_PPC_BOOK3S_32
current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
#endif
} }
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{ {
memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); #ifdef CONFIG_PPC_BOOK3S_64
memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu)); sizeof(get_paca()->shadow_vcpu));
to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
#endif
kvmppc_giveup_ext(vcpu, MSR_FP); kvmppc_giveup_ext(vcpu, MSR_FP);
kvmppc_giveup_ext(vcpu, MSR_VEC); kvmppc_giveup_ext(vcpu, MSR_VEC);
...@@ -144,7 +152,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) ...@@ -144,7 +152,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
VSID_SPLIT_MASK); VSID_SPLIT_MASK);
kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
} }
/* Preload FPU if it's enabled */ /* Preload FPU if it's enabled */
...@@ -154,9 +162,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) ...@@ -154,9 +162,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{ {
vcpu->arch.srr0 = vcpu->arch.pc; vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
vcpu->arch.srr1 = vcpu->arch.msr | flags; vcpu->arch.srr1 = vcpu->arch.msr | flags;
vcpu->arch.pc = to_book3s(vcpu)->hior + vec; kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
vcpu->arch.mmu.reset_msr(vcpu); vcpu->arch.mmu.reset_msr(vcpu);
} }
...@@ -551,20 +559,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -551,20 +559,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) { if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */ /* Page not found in guest PTE entries */
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) { } else if (page_found == -EPERM) {
/* Storage protection */ /* Storage protection */
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) { } else if (page_found == -EINVAL) {
/* Page not found in guest SLB */ /* Page not found in guest SLB */
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
} else if (!is_mmio && } else if (!is_mmio &&
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
...@@ -646,10 +654,11 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) ...@@ -646,10 +654,11 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
static int kvmppc_read_inst(struct kvm_vcpu *vcpu) static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
{ {
ulong srr0 = vcpu->arch.pc; ulong srr0 = kvmppc_get_pc(vcpu);
u32 last_inst = kvmppc_get_last_inst(vcpu);
int ret; int ret;
ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &vcpu->arch.last_inst, false); ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
if (ret == -ENOENT) { if (ret == -ENOENT) {
vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
...@@ -754,12 +763,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -754,12 +763,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1; run->ready_for_interrupt_injection = 1;
#ifdef EXIT_DEBUG #ifdef EXIT_DEBUG
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
kvmppc_get_dec(vcpu), vcpu->arch.msr); kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
#elif defined (EXIT_DEBUG_SIMPLE) #elif defined (EXIT_DEBUG_SIMPLE)
if ((exit_nr != 0x900) && (exit_nr != 0x500)) if ((exit_nr != 0x900) && (exit_nr != 0x500))
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
vcpu->arch.msr); vcpu->arch.msr);
#endif #endif
kvm_resched(vcpu); kvm_resched(vcpu);
...@@ -767,8 +776,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -767,8 +776,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_INST_STORAGE: case BOOK3S_INTERRUPT_INST_STORAGE:
vcpu->stat.pf_instruc++; vcpu->stat.pf_instruc++;
/* only care about PTEG not found errors, but leave NX alone */ /* only care about PTEG not found errors, but leave NX alone */
if (vcpu->arch.shadow_srr1 & 0x40000000) { if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
vcpu->stat.sp_instruc++; vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) && } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
...@@ -777,38 +786,41 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -777,38 +786,41 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* so we can't use the NX bit inside the guest. Let's cross our fingers, * so we can't use the NX bit inside the guest. Let's cross our fingers,
* that no guest that needs the dcbz hack does NX. * that no guest that needs the dcbz hack does NX.
*/ */
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
r = RESUME_GUEST; r = RESUME_GUEST;
} else { } else {
vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr); kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
case BOOK3S_INTERRUPT_DATA_STORAGE: case BOOK3S_INTERRUPT_DATA_STORAGE:
{
ulong dar = kvmppc_get_fault_dar(vcpu);
vcpu->stat.pf_storage++; vcpu->stat.pf_storage++;
/* The only case we need to handle is missing shadow PTEs */ /* The only case we need to handle is missing shadow PTEs */
if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
} else { } else {
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = dar;
to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr); kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
}
case BOOK3S_INTERRUPT_DATA_SEGMENT: case BOOK3S_INTERRUPT_DATA_SEGMENT:
if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
kvmppc_book3s_queue_irqprio(vcpu, kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_SEGMENT); BOOK3S_INTERRUPT_DATA_SEGMENT);
} }
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case BOOK3S_INTERRUPT_INST_SEGMENT: case BOOK3S_INTERRUPT_INST_SEGMENT:
if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
kvmppc_book3s_queue_irqprio(vcpu, kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_INST_SEGMENT); BOOK3S_INTERRUPT_INST_SEGMENT);
} }
...@@ -829,13 +841,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -829,13 +841,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
ulong flags; ulong flags;
program_interrupt: program_interrupt:
flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
if (vcpu->arch.msr & MSR_PR) { if (vcpu->arch.msr & MSR_PR) {
#ifdef EXIT_DEBUG #ifdef EXIT_DEBUG
printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
#endif #endif
if ((vcpu->arch.last_inst & 0xff0007ff) != if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
(INS_DCBZ & 0xfffffff7)) { (INS_DCBZ & 0xfffffff7)) {
kvmppc_core_queue_program(vcpu, flags); kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -854,7 +866,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -854,7 +866,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case EMULATE_FAIL: case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
__func__, vcpu->arch.pc, vcpu->arch.last_inst); __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
kvmppc_core_queue_program(vcpu, flags); kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -917,9 +929,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -917,9 +929,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_ALIGNMENT: case BOOK3S_INTERRUPT_ALIGNMENT:
if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu,
vcpu->arch.last_inst); kvmppc_get_last_inst(vcpu));
vcpu->arch.dear = kvmppc_alignment_dar(vcpu, vcpu->arch.dear = kvmppc_alignment_dar(vcpu,
vcpu->arch.last_inst); kvmppc_get_last_inst(vcpu));
kvmppc_book3s_queue_irqprio(vcpu, exit_nr); kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
} }
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -932,7 +944,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -932,7 +944,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
default: default:
/* Ugh - bork here! What did we get? */ /* Ugh - bork here! What did we get? */
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
r = RESUME_HOST; r = RESUME_HOST;
BUG(); BUG();
break; break;
...@@ -959,7 +971,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -959,7 +971,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
#ifdef EXIT_DEBUG #ifdef EXIT_DEBUG
printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
#endif #endif
return r; return r;
...@@ -976,10 +988,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -976,10 +988,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu_load(vcpu); vcpu_load(vcpu);
regs->pc = vcpu->arch.pc; regs->pc = kvmppc_get_pc(vcpu);
regs->cr = kvmppc_get_cr(vcpu); regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr; regs->ctr = kvmppc_get_ctr(vcpu);
regs->lr = vcpu->arch.lr; regs->lr = kvmppc_get_lr(vcpu);
regs->xer = kvmppc_get_xer(vcpu); regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.msr; regs->msr = vcpu->arch.msr;
regs->srr0 = vcpu->arch.srr0; regs->srr0 = vcpu->arch.srr0;
...@@ -1007,10 +1019,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -1007,10 +1019,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu_load(vcpu); vcpu_load(vcpu);
vcpu->arch.pc = regs->pc; kvmppc_set_pc(vcpu, regs->pc);
kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr; kvmppc_set_ctr(vcpu, regs->ctr);
vcpu->arch.lr = regs->lr; kvmppc_set_lr(vcpu, regs->lr);
kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr); kvmppc_set_msr(vcpu, regs->msr);
vcpu->arch.srr0 = regs->srr0; vcpu->arch.srr0 = regs->srr0;
...@@ -1157,19 +1169,23 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1157,19 +1169,23 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s; struct kvmppc_vcpu_book3s *vcpu_book3s;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int err; int err = -ENOMEM;
vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
if (!vcpu_book3s) { if (!vcpu_book3s)
err = -ENOMEM;
goto out; goto out;
}
memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s)); memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
if (!vcpu_book3s->shadow_vcpu)
goto free_vcpu;
vcpu = &vcpu_book3s->vcpu; vcpu = &vcpu_book3s->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id); err = kvm_vcpu_init(vcpu, kvm, id);
if (err) if (err)
goto free_vcpu; goto free_shadow_vcpu;
vcpu->arch.host_retip = kvm_return_point; vcpu->arch.host_retip = kvm_return_point;
vcpu->arch.host_msr = mfmsr(); vcpu->arch.host_msr = mfmsr();
...@@ -1188,7 +1204,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1188,7 +1204,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
err = __init_new_context(); err = __init_new_context();
if (err < 0) if (err < 0)
goto free_vcpu; goto free_shadow_vcpu;
vcpu_book3s->context_id = err; vcpu_book3s->context_id = err;
vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
...@@ -1197,6 +1213,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1197,6 +1213,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
return vcpu; return vcpu;
free_shadow_vcpu:
kfree(vcpu_book3s->shadow_vcpu);
free_vcpu: free_vcpu:
vfree(vcpu_book3s); vfree(vcpu_book3s);
out: out:
...@@ -1209,6 +1227,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -1209,6 +1227,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
__destroy_context(vcpu_book3s->context_id); __destroy_context(vcpu_book3s->context_id);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kfree(vcpu_book3s->shadow_vcpu);
vfree(vcpu_book3s); vfree(vcpu_book3s);
} }
......
...@@ -383,7 +383,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) ...@@ -383,7 +383,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
if (vcpu->arch.msr & MSR_IR) { if (vcpu->arch.msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
} }
} }
......
...@@ -331,14 +331,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) ...@@ -331,14 +331,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
int found_inval = -1; int found_inval = -1;
int r; int r;
if (!get_paca()->kvm_slb_max) if (!to_svcpu(vcpu)->slb_max)
get_paca()->kvm_slb_max = 1; to_svcpu(vcpu)->slb_max = 1;
/* Are we overwriting? */ /* Are we overwriting? */
for (i = 1; i < get_paca()->kvm_slb_max; i++) { for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V)) if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
found_inval = i; found_inval = i;
else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid) else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
return i; return i;
} }
...@@ -352,11 +352,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) ...@@ -352,11 +352,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
max_slb_size = mmu_slb_size; max_slb_size = mmu_slb_size;
/* Overflowing -> purge */ /* Overflowing -> purge */
if ((get_paca()->kvm_slb_max) == max_slb_size) if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_flush_segments(vcpu);
r = get_paca()->kvm_slb_max; r = to_svcpu(vcpu)->slb_max;
get_paca()->kvm_slb_max++; to_svcpu(vcpu)->slb_max++;
return r; return r;
} }
...@@ -374,7 +374,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) ...@@ -374,7 +374,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
/* Invalidate an entry */ /* Invalidate an entry */
get_paca()->kvm_slb[slb_index].esid = 0; to_svcpu(vcpu)->slb[slb_index].esid = 0;
return -ENOENT; return -ENOENT;
} }
...@@ -388,8 +388,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) ...@@ -388,8 +388,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
slb_vsid &= ~SLB_VSID_KP; slb_vsid &= ~SLB_VSID_KP;
slb_esid |= slb_index; slb_esid |= slb_index;
get_paca()->kvm_slb[slb_index].esid = slb_esid; to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
get_paca()->kvm_slb[slb_index].vsid = slb_vsid; to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
...@@ -398,8 +398,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) ...@@ -398,8 +398,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{ {
get_paca()->kvm_slb_max = 1; to_svcpu(vcpu)->slb_max = 1;
get_paca()->kvm_slb[0].esid = 0; to_svcpu(vcpu)->slb[0].esid = 0;
} }
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
......
...@@ -69,7 +69,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -69,7 +69,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_xop(inst)) { switch (get_xop(inst)) {
case OP_19_XOP_RFID: case OP_19_XOP_RFID:
case OP_19_XOP_RFI: case OP_19_XOP_RFI:
vcpu->arch.pc = vcpu->arch.srr0; kvmppc_set_pc(vcpu, vcpu->arch.srr0);
kvmppc_set_msr(vcpu, vcpu->arch.srr1); kvmppc_set_msr(vcpu, vcpu->arch.srr1);
*advance = 0; *advance = 0;
break; break;
...@@ -208,7 +208,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -208,7 +208,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if ((r == -ENOENT) || (r == -EPERM)) { if ((r == -ENOENT) || (r == -EPERM)) {
*advance = 0; *advance = 0;
vcpu->arch.dear = vaddr; vcpu->arch.dear = vaddr;
vcpu->arch.fault_dear = vaddr; to_svcpu(vcpu)->fault_dar = vaddr;
dsisr = DSISR_ISSTORE; dsisr = DSISR_ISSTORE;
if (r == -ENOENT) if (r == -ENOENT)
...@@ -217,7 +217,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -217,7 +217,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
dsisr |= DSISR_PROTFAULT; dsisr |= DSISR_PROTFAULT;
to_book3s(vcpu)->dsisr = dsisr; to_book3s(vcpu)->dsisr = dsisr;
vcpu->arch.fault_dsisr = dsisr; to_svcpu(vcpu)->fault_dsisr = dsisr;
kvmppc_book3s_queue_irqprio(vcpu, kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE); BOOK3S_INTERRUPT_DATA_STORAGE);
......
...@@ -656,7 +656,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, ...@@ -656,7 +656,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{ {
u32 inst = vcpu->arch.last_inst; u32 inst = kvmppc_get_last_inst(vcpu);
enum emulation_result emulated = EMULATE_DONE; enum emulation_result emulated = EMULATE_DONE;
int ax_rd = inst_get_field(inst, 6, 10); int ax_rd = inst_get_field(inst, 6, 10);
......
...@@ -132,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) ...@@ -132,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
* from opcode tables in the future. */ * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{ {
u32 inst = vcpu->arch.last_inst; u32 inst = kvmppc_get_last_inst(vcpu);
u32 ea; u32 ea;
int ra; int ra;
int rb; int rb;
...@@ -516,10 +516,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -516,10 +516,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
} }
trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
/* Advance past emulated instruction. */
if (advance) if (advance)
vcpu->arch.pc += 4; /* Advance past emulated instruction. */ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
return emulated; return emulated;
} }
...@@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EMULATE_FAIL: case EMULATE_FAIL:
/* XXX Deliver Program interrupt to guest. */ /* XXX Deliver Program interrupt to guest. */
printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
vcpu->arch.last_inst); kvmppc_get_last_inst(vcpu));
r = RESUME_HOST; r = RESUME_HOST;
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment