Commit 7e57cba0 authored by Alexander Graf's avatar Alexander Graf Committed by Marcelo Tosatti

KVM: PPC: Use PACA backed shadow vcpu

We're being horribly racy right now. All the entry and exit code hijacks
random fields from the PACA that could easily be used by different code in
case we get interrupted, for example by a #MC or even page fault.

After discussing this with Ben, we figured it's best to reserve some more
space in the PACA and just shove off some vcpu state to there.

That way we can drastically improve the readability of the code, make it
less racy and less complex.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 992b5b29
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/kvm_book3s_64_asm.h>
struct kvmppc_slb { struct kvmppc_slb {
u64 esid; u64 esid;
...@@ -69,6 +70,7 @@ struct kvmppc_sid_map { ...@@ -69,6 +70,7 @@ struct kvmppc_sid_map {
struct kvmppc_vcpu_book3s { struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct kvmppc_slb slb[64]; struct kvmppc_slb slb[64];
struct { struct {
......
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#ifndef __ASM_KVM_BOOK3S_ASM_H__ #ifndef __ASM_KVM_BOOK3S_ASM_H__
#define __ASM_KVM_BOOK3S_ASM_H__ #define __ASM_KVM_BOOK3S_ASM_H__
#ifdef __ASSEMBLY__
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
...@@ -55,4 +57,21 @@ kvmppc_resume_\intno: ...@@ -55,4 +57,21 @@ kvmppc_resume_\intno:
#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */ #endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
#else /*__ASSEMBLY__ */
struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14];
u32 cr;
u32 xer;
ulong host_r1;
ulong host_r2;
ulong handler;
ulong scratch0;
ulong scratch1;
ulong vmhandler;
ulong rmhandler;
};
#endif /*__ASSEMBLY__ */
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */ #endif /* __ASM_KVM_BOOK3S_ASM_H__ */
...@@ -175,10 +175,13 @@ struct kvm_vcpu_arch { ...@@ -175,10 +175,13 @@ struct kvm_vcpu_arch {
ulong gpr[32]; ulong gpr[32];
ulong pc; ulong pc;
u32 cr;
ulong ctr; ulong ctr;
ulong lr; ulong lr;
#ifdef CONFIG_BOOKE
ulong xer; ulong xer;
u32 cr;
#endif
ulong msr; ulong msr;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
......
...@@ -98,34 +98,42 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); ...@@ -98,34 +98,42 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
/* We assume we're always acting on the current vcpu */
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{ {
vcpu->arch.gpr[num] = val; if ( num < 14 )
get_paca()->shadow_vcpu.gpr[num] = val;
else
vcpu->arch.gpr[num] = val;
} }
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{ {
return vcpu->arch.gpr[num]; if ( num < 14 )
return get_paca()->shadow_vcpu.gpr[num];
else
return vcpu->arch.gpr[num];
} }
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{ {
vcpu->arch.cr = val; get_paca()->shadow_vcpu.cr = val;
} }
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.cr; return get_paca()->shadow_vcpu.cr;
} }
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{ {
vcpu->arch.xer = val; get_paca()->shadow_vcpu.xer = val;
} }
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.xer; return get_paca()->shadow_vcpu.xer;
} }
#else #else
......
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/exception-64e.h> #include <asm/exception-64e.h>
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_book3s_64_asm.h>
#endif
register struct paca_struct *local_paca asm("r13"); register struct paca_struct *local_paca asm("r13");
...@@ -135,6 +138,8 @@ struct paca_struct { ...@@ -135,6 +138,8 @@ struct paca_struct {
u64 esid; u64 esid;
u64 vsid; u64 vsid;
} kvm_slb[64]; /* guest SLB */ } kvm_slb[64]; /* guest SLB */
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
u8 kvm_slb_max; /* highest used guest slb entry */ u8 kvm_slb_max; /* highest used guest slb entry */
u8 kvm_in_guest; /* are we inside the guest? */ u8 kvm_in_guest; /* are we inside the guest? */
#endif #endif
......
...@@ -194,6 +194,32 @@ int main(void) ...@@ -194,6 +194,32 @@ int main(void)
DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
shadow_vcpu.vmhandler));
DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct,
shadow_vcpu.rmhandler));
DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
shadow_vcpu.scratch0));
DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
shadow_vcpu.scratch1));
#endif #endif
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
...@@ -389,8 +415,6 @@ int main(void) ...@@ -389,8 +415,6 @@ int main(void)
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
...@@ -415,7 +439,10 @@ int main(void) ...@@ -415,7 +439,10 @@ int main(void)
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
#endif #else
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
#endif /* CONFIG_PPC64 */
#endif #endif
#ifdef CONFIG_44x #ifdef CONFIG_44x
DEFINE(PGD_T_LOG2, PGD_T_LOG2); DEFINE(PGD_T_LOG2, PGD_T_LOG2);
......
...@@ -66,12 +66,16 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) ...@@ -66,12 +66,16 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
} }
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{ {
memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
} }
......
...@@ -28,11 +28,6 @@ ...@@ -28,11 +28,6 @@
#define ULONG_SIZE 8 #define ULONG_SIZE 8
#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
.macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
ld \tmp_reg, (PACA_EXMC+\offset)(r13)
std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
.endm
.macro DISABLE_INTERRUPTS .macro DISABLE_INTERRUPTS
mfmsr r0 mfmsr r0
rldicl r0,r0,48,1 rldicl r0,r0,48,1
...@@ -92,37 +87,30 @@ kvm_start_entry: ...@@ -92,37 +87,30 @@ kvm_start_entry:
/* Load non-volatile guest state from the vcpu */ /* Load non-volatile guest state from the vcpu */
VCPU_LOAD_NVGPRS(r4) VCPU_LOAD_NVGPRS(r4)
kvm_start_lightweight:
ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
DISABLE_INTERRUPTS
/* Save R1/R2 in the PACA */ /* Save R1/R2 in the PACA */
std r1, PACAR1(r13) std r1, PACA_KVM_HOST_R1(r13)
std r2, (PACA_EXMC+EX_SRR0)(r13) std r2, PACA_KVM_HOST_R2(r13)
/* XXX swap in/out on load? */
ld r3, VCPU_HIGHMEM_HANDLER(r4) ld r3, VCPU_HIGHMEM_HANDLER(r4)
std r3, PACASAVEDMSR(r13) std r3, PACA_KVM_VMHANDLER(r13)
ld r3, VCPU_TRAMPOLINE_ENTER(r4) ld r3, VCPU_TRAMPOLINE_ENTER(r4)
mtsrr0 r3 std r3, PACA_KVM_RMHANDLER(r13)
LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) kvm_start_lightweight:
mtsrr1 r3
/* Load guest state in the respective registers */ ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */ ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
stw r3, (PACA_EXMC + EX_CCR)(r13)
/* Load some guest state in the respective registers */
ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
mtctr r3 /* CTR = r3 */ mtctr r3 /* CTR = r3 */
ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
mtlr r3 /* LR = r3 */ mtlr r3 /* LR = r3 */
ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */ DISABLE_INTERRUPTS
std r3, (PACA_EXMC + EX_R3)(r13)
/* Some guests may need to have dcbz set to 32 byte length. /* Some guests may need to have dcbz set to 32 byte length.
* *
...@@ -142,34 +130,21 @@ kvm_start_lightweight: ...@@ -142,34 +130,21 @@ kvm_start_lightweight:
mtspr SPRN_HID5,r3 mtspr SPRN_HID5,r3
no_dcbz32_on: no_dcbz32_on:
/* Load guest GPRs */
ld r3, VCPU_GPR(r9)(r4)
std r3, (PACA_EXMC + EX_R9)(r13)
ld r3, VCPU_GPR(r10)(r4)
std r3, (PACA_EXMC + EX_R10)(r13)
ld r3, VCPU_GPR(r11)(r4)
std r3, (PACA_EXMC + EX_R11)(r13)
ld r3, VCPU_GPR(r12)(r4)
std r3, (PACA_EXMC + EX_R12)(r13)
ld r3, VCPU_GPR(r13)(r4)
std r3, (PACA_EXMC + EX_R13)(r13)
ld r0, VCPU_GPR(r0)(r4)
ld r1, VCPU_GPR(r1)(r4)
ld r2, VCPU_GPR(r2)(r4)
ld r3, VCPU_GPR(r3)(r4)
ld r5, VCPU_GPR(r5)(r4)
ld r6, VCPU_GPR(r6)(r4)
ld r7, VCPU_GPR(r7)(r4)
ld r8, VCPU_GPR(r8)(r4)
ld r4, VCPU_GPR(r4)(r4)
/* This sets the Magic value for the trampoline */ /* This sets the Magic value for the trampoline */
/* XXX this needs to move into a safe function, so we can
be sure we don't get any interrupts */
li r11, 1 li r11, 1
stb r11, PACA_KVM_IN_GUEST(r13) stb r11, PACA_KVM_IN_GUEST(r13)
ld r3, PACA_KVM_RMHANDLER(r13)
mtsrr0 r3
LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
mtsrr1 r3
/* Jump to SLB patching handlder and into our guest */ /* Jump to SLB patching handlder and into our guest */
RFI RFI
...@@ -185,60 +160,31 @@ kvmppc_handler_highmem: ...@@ -185,60 +160,31 @@ kvmppc_handler_highmem:
/* /*
* Register usage at this point: * Register usage at this point:
* *
* R00 = guest R13 * R0 = guest last inst
* R01 = host R1 * R1 = host R1
* R02 = host R2 * R2 = host R2
* R10 = guest PC * R3 = guest PC
* R11 = guest MSR * R4 = guest MSR
* R12 = exit handler id * R5 = guest DAR
* R13 = PACA * R6 = guest DSISR
* PACA.exmc.R9 = guest R1 * R13 = PACA
* PACA.exmc.R10 = guest R10 * PACA.KVM.* = guest *
* PACA.exmc.R11 = guest R11
* PACA.exmc.R12 = guest R12
* PACA.exmc.R13 = guest R2
* PACA.exmc.DAR = guest DAR
* PACA.exmc.DSISR = guest DSISR
* PACA.exmc.LR = guest instruction
* PACA.exmc.CCR = guest CR
* PACA.exmc.SRR0 = guest R0
* *
*/ */
std r3, (PACA_EXMC+EX_R3)(r13) /* R7 = vcpu */
ld r7, GPR4(r1)
/* save the exit id in R3 */ /* Now save the guest state */
mr r3, r12
/* R12 = vcpu */ stw r0, VCPU_LAST_INST(r7)
ld r12, GPR4(r1)
/* Now save the guest state */ std r3, VCPU_PC(r7)
std r4, VCPU_SHADOW_MSR(r7)
std r5, VCPU_FAULT_DEAR(r7)
std r6, VCPU_FAULT_DSISR(r7)
std r0, VCPU_GPR(r13)(r12) ld r5, VCPU_HFLAGS(r7)
std r4, VCPU_GPR(r4)(r12)
std r5, VCPU_GPR(r5)(r12)
std r6, VCPU_GPR(r6)(r12)
std r7, VCPU_GPR(r7)(r12)
std r8, VCPU_GPR(r8)(r12)
std r9, VCPU_GPR(r9)(r12)
/* get registers from PACA */
mfpaca r5, r0, EX_SRR0, r12
mfpaca r5, r3, EX_R3, r12
mfpaca r5, r1, EX_R9, r12
mfpaca r5, r10, EX_R10, r12
mfpaca r5, r11, EX_R11, r12
mfpaca r5, r12, EX_R12, r12
mfpaca r5, r2, EX_R13, r12
lwz r5, (PACA_EXMC+EX_LR)(r13)
stw r5, VCPU_LAST_INST(r12)
lwz r5, (PACA_EXMC+EX_CCR)(r13)
stw r5, VCPU_CR(r12)
ld r5, VCPU_HFLAGS(r12)
rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
beq no_dcbz32_off beq no_dcbz32_off
...@@ -248,58 +194,42 @@ kvmppc_handler_highmem: ...@@ -248,58 +194,42 @@ kvmppc_handler_highmem:
no_dcbz32_off: no_dcbz32_off:
std r14, VCPU_GPR(r14)(r12) std r14, VCPU_GPR(r14)(r7)
std r15, VCPU_GPR(r15)(r12) std r15, VCPU_GPR(r15)(r7)
std r16, VCPU_GPR(r16)(r12) std r16, VCPU_GPR(r16)(r7)
std r17, VCPU_GPR(r17)(r12) std r17, VCPU_GPR(r17)(r7)
std r18, VCPU_GPR(r18)(r12) std r18, VCPU_GPR(r18)(r7)
std r19, VCPU_GPR(r19)(r12) std r19, VCPU_GPR(r19)(r7)
std r20, VCPU_GPR(r20)(r12) std r20, VCPU_GPR(r20)(r7)
std r21, VCPU_GPR(r21)(r12) std r21, VCPU_GPR(r21)(r7)
std r22, VCPU_GPR(r22)(r12) std r22, VCPU_GPR(r22)(r7)
std r23, VCPU_GPR(r23)(r12) std r23, VCPU_GPR(r23)(r7)
std r24, VCPU_GPR(r24)(r12) std r24, VCPU_GPR(r24)(r7)
std r25, VCPU_GPR(r25)(r12) std r25, VCPU_GPR(r25)(r7)
std r26, VCPU_GPR(r26)(r12) std r26, VCPU_GPR(r26)(r7)
std r27, VCPU_GPR(r27)(r12) std r27, VCPU_GPR(r27)(r7)
std r28, VCPU_GPR(r28)(r12) std r28, VCPU_GPR(r28)(r7)
std r29, VCPU_GPR(r29)(r12) std r29, VCPU_GPR(r29)(r7)
std r30, VCPU_GPR(r30)(r12) std r30, VCPU_GPR(r30)(r7)
std r31, VCPU_GPR(r31)(r12) std r31, VCPU_GPR(r31)(r7)
/* Save guest PC (R10) */ /* Save guest CTR */
std r10, VCPU_PC(r12)
/* Save guest msr (R11) */
std r11, VCPU_SHADOW_MSR(r12)
/* Save guest CTR (in R12) */
mfctr r5 mfctr r5
std r5, VCPU_CTR(r12) std r5, VCPU_CTR(r7)
/* Save guest LR */ /* Save guest LR */
mflr r5 mflr r5
std r5, VCPU_LR(r12) std r5, VCPU_LR(r7)
/* Save guest XER */
mfxer r5
std r5, VCPU_XER(r12)
/* Save guest DAR */ /* XXX convert to safe function call */
ld r5, (PACA_EXMC+EX_DAR)(r13)
std r5, VCPU_FAULT_DEAR(r12)
/* Save guest DSISR */
lwz r5, (PACA_EXMC+EX_DSISR)(r13)
std r5, VCPU_FAULT_DSISR(r12)
/* Restore host msr -> SRR1 */ /* Restore host msr -> SRR1 */
ld r7, VCPU_HOST_MSR(r12) ld r6, VCPU_HOST_MSR(r7)
mtsrr1 r7 mtsrr1 r6
/* Restore host IP -> SRR0 */ /* Restore host IP -> SRR0 */
ld r6, VCPU_HOST_RETIP(r12) ld r5, VCPU_HOST_RETIP(r7)
mtsrr0 r6 mtsrr0 r5
/* /*
* For some interrupts, we need to call the real Linux * For some interrupts, we need to call the real Linux
...@@ -311,9 +241,9 @@ no_dcbz32_off: ...@@ -311,9 +241,9 @@ no_dcbz32_off:
* r3 = address of interrupt handler (exit reason) * r3 = address of interrupt handler (exit reason)
*/ */
cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq call_linux_handler beq call_linux_handler
cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq call_linux_handler beq call_linux_handler
/* Back to Interruptable Mode! (goto kvm_return_point) */ /* Back to Interruptable Mode! (goto kvm_return_point) */
...@@ -334,12 +264,12 @@ call_linux_handler: ...@@ -334,12 +264,12 @@ call_linux_handler:
* R7 VCPU_HOST_MSR * R7 VCPU_HOST_MSR
*/ */
mtlr r3 mtlr r12
ld r5, VCPU_TRAMPOLINE_LOWMEM(r12) ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
mtsrr0 r5 mtsrr0 r4
LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR)) LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
mtsrr1 r5 mtsrr1 r3
RFI RFI
...@@ -350,7 +280,7 @@ kvm_return_point: ...@@ -350,7 +280,7 @@ kvm_return_point:
/* go back into the guest */ /* go back into the guest */
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */ /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
mr r5, r3 mr r5, r12
/* Restore r3 (kvm_run) and r4 (vcpu) */ /* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1) REST_2GPRS(3, r1)
......
...@@ -45,37 +45,21 @@ kvmppc_trampoline_\intno: ...@@ -45,37 +45,21 @@ kvmppc_trampoline_\intno:
* To distinguish, we check a magic byte in the PACA * To distinguish, we check a magic byte in the PACA
*/ */
mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
std r12, (PACA_EXMC + EX_R12)(r13) std r12, PACA_KVM_SCRATCH0(r13)
mfcr r12 mfcr r12
stw r12, (PACA_EXMC + EX_CCR)(r13) stw r12, PACA_KVM_SCRATCH1(r13)
lbz r12, PACA_KVM_IN_GUEST(r13) lbz r12, PACA_KVM_IN_GUEST(r13)
cmpwi r12, 0 cmpwi r12, 0
bne ..kvmppc_handler_hasmagic_\intno bne ..kvmppc_handler_hasmagic_\intno
/* No KVM guest? Then jump back to the Linux handler! */ /* No KVM guest? Then jump back to the Linux handler! */
lwz r12, (PACA_EXMC + EX_CCR)(r13) lwz r12, PACA_KVM_SCRATCH1(r13)
mtcr r12 mtcr r12
ld r12, (PACA_EXMC + EX_R12)(r13) ld r12, PACA_KVM_SCRATCH0(r13)
mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
b kvmppc_resume_\intno /* Get back original handler */ b kvmppc_resume_\intno /* Get back original handler */
/* Now we know we're handling a KVM guest */ /* Now we know we're handling a KVM guest */
..kvmppc_handler_hasmagic_\intno: ..kvmppc_handler_hasmagic_\intno:
/* Unset guest state */
li r12, 0
stb r12, PACA_KVM_IN_GUEST(r13)
std r1, (PACA_EXMC+EX_R9)(r13)
std r10, (PACA_EXMC+EX_R10)(r13)
std r11, (PACA_EXMC+EX_R11)(r13)
std r2, (PACA_EXMC+EX_R13)(r13)
mfsrr0 r10
mfsrr1 r11
/* Restore R1/R2 so we can handle faults */
ld r1, PACAR1(r13)
ld r2, (PACA_EXMC+EX_SRR0)(r13)
/* Let's store which interrupt we're handling */ /* Let's store which interrupt we're handling */
li r12, \intno li r12, \intno
...@@ -106,16 +90,16 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX ...@@ -106,16 +90,16 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
* *
* Input Registers: * Input Registers:
* *
* R6 = SRR0 * R5 = SRR0
* R7 = SRR1 * R6 = SRR1
* LR = real-mode IP * LR = real-mode IP
* *
*/ */
.global kvmppc_handler_lowmem_trampoline .global kvmppc_handler_lowmem_trampoline
kvmppc_handler_lowmem_trampoline: kvmppc_handler_lowmem_trampoline:
mtsrr0 r6 mtsrr0 r5
mtsrr1 r7 mtsrr1 r6
blr blr
kvmppc_handler_lowmem_trampoline_end: kvmppc_handler_lowmem_trampoline_end:
......
...@@ -51,24 +51,18 @@ kvmppc_handler_trampoline_enter: ...@@ -51,24 +51,18 @@ kvmppc_handler_trampoline_enter:
* *
* MSR = ~IR|DR * MSR = ~IR|DR
* R13 = PACA * R13 = PACA
* R1 = host R1
* R2 = host R2
* R9 = guest IP * R9 = guest IP
* R10 = guest MSR * R10 = guest MSR
* R11 = free * all other GPRS = free
* R12 = free * PACA[KVM_CR] = guest CR
* PACA[PACA_EXMC + EX_R9] = guest R9 * PACA[KVM_XER] = guest XER
* PACA[PACA_EXMC + EX_R10] = guest R10
* PACA[PACA_EXMC + EX_R11] = guest R11
* PACA[PACA_EXMC + EX_R12] = guest R12
* PACA[PACA_EXMC + EX_R13] = guest R13
* PACA[PACA_EXMC + EX_CCR] = guest CR
* PACA[PACA_EXMC + EX_R3] = guest XER
*/ */
mtsrr0 r9 mtsrr0 r9
mtsrr1 r10 mtsrr1 r10
mtspr SPRN_SPRG_SCRATCH0, r0
/* Remove LPAR shadow entries */ /* Remove LPAR shadow entries */
#if SLB_NUM_BOLTED == 3 #if SLB_NUM_BOLTED == 3
...@@ -131,20 +125,27 @@ slb_do_enter: ...@@ -131,20 +125,27 @@ slb_do_enter:
/* Enter guest */ /* Enter guest */
mfspr r0, SPRN_SPRG_SCRATCH0 ld r0, (PACA_KVM_R0)(r13)
ld r1, (PACA_KVM_R1)(r13)
ld r9, (PACA_EXMC+EX_R9)(r13) ld r2, (PACA_KVM_R2)(r13)
ld r10, (PACA_EXMC+EX_R10)(r13) ld r3, (PACA_KVM_R3)(r13)
ld r12, (PACA_EXMC+EX_R12)(r13) ld r4, (PACA_KVM_R4)(r13)
ld r5, (PACA_KVM_R5)(r13)
lwz r11, (PACA_EXMC+EX_CCR)(r13) ld r6, (PACA_KVM_R6)(r13)
ld r7, (PACA_KVM_R7)(r13)
ld r8, (PACA_KVM_R8)(r13)
ld r9, (PACA_KVM_R9)(r13)
ld r10, (PACA_KVM_R10)(r13)
ld r12, (PACA_KVM_R12)(r13)
lwz r11, (PACA_KVM_CR)(r13)
mtcr r11 mtcr r11
ld r11, (PACA_EXMC+EX_R3)(r13) ld r11, (PACA_KVM_XER)(r13)
mtxer r11 mtxer r11
ld r11, (PACA_EXMC+EX_R11)(r13) ld r11, (PACA_KVM_R11)(r13)
ld r13, (PACA_EXMC+EX_R13)(r13) ld r13, (PACA_KVM_R13)(r13)
RFI RFI
kvmppc_handler_trampoline_enter_end: kvmppc_handler_trampoline_enter_end:
...@@ -162,28 +163,58 @@ kvmppc_handler_trampoline_exit: ...@@ -162,28 +163,58 @@ kvmppc_handler_trampoline_exit:
/* Register usage at this point: /* Register usage at this point:
* *
* SPRG_SCRATCH0 = guest R13 * SPRG_SCRATCH0 = guest R13
* R01 = host R1 * R12 = exit handler id
* R02 = host R2 * R13 = PACA
* R10 = guest PC * PACA.KVM.SCRATCH0 = guest R12
* R11 = guest MSR * PACA.KVM.SCRATCH1 = guest CR
* R12 = exit handler id
* R13 = PACA
* PACA.exmc.CCR = guest CR
* PACA.exmc.R9 = guest R1
* PACA.exmc.R10 = guest R10
* PACA.exmc.R11 = guest R11
* PACA.exmc.R12 = guest R12
* PACA.exmc.R13 = guest R2
* *
*/ */
/* Save registers */ /* Save registers */
std r0, (PACA_EXMC+EX_SRR0)(r13) std r0, PACA_KVM_R0(r13)
std r9, (PACA_EXMC+EX_R3)(r13) std r1, PACA_KVM_R1(r13)
std r10, (PACA_EXMC+EX_LR)(r13) std r2, PACA_KVM_R2(r13)
std r11, (PACA_EXMC+EX_DAR)(r13) std r3, PACA_KVM_R3(r13)
std r4, PACA_KVM_R4(r13)
std r5, PACA_KVM_R5(r13)
std r6, PACA_KVM_R6(r13)
std r7, PACA_KVM_R7(r13)
std r8, PACA_KVM_R8(r13)
std r9, PACA_KVM_R9(r13)
std r10, PACA_KVM_R10(r13)
std r11, PACA_KVM_R11(r13)
/* Restore R1/R2 so we can handle faults */
ld r1, PACA_KVM_HOST_R1(r13)
ld r2, PACA_KVM_HOST_R2(r13)
/* Save guest PC and MSR in GPRs */
mfsrr0 r3
mfsrr1 r4
/* Get scratch'ed off registers */
mfspr r9, SPRN_SPRG_SCRATCH0
std r9, PACA_KVM_R13(r13)
ld r8, PACA_KVM_SCRATCH0(r13)
std r8, PACA_KVM_R12(r13)
lwz r7, PACA_KVM_SCRATCH1(r13)
stw r7, PACA_KVM_CR(r13)
/* Save more register state */
mfxer r6
stw r6, PACA_KVM_XER(r13)
mfdar r5
mfdsisr r6
/* Unset guest state */
li r9, 0
stb r9, PACA_KVM_IN_GUEST(r13)
/* /*
* In order for us to easily get the last instruction, * In order for us to easily get the last instruction,
...@@ -207,7 +238,8 @@ ld_last_inst: ...@@ -207,7 +238,8 @@ ld_last_inst:
ori r11, r9, MSR_DR /* Enable paging for data */ ori r11, r9, MSR_DR /* Enable paging for data */
mtmsr r11 mtmsr r11
/* 2) fetch the instruction */ /* 2) fetch the instruction */
lwz r0, 0(r10) /* XXX implement PACA_KVM_IN_GUEST=2 path to safely jump over this */
lwz r0, 0(r3)
/* 3) disable paging again */ /* 3) disable paging again */
mtmsr r9 mtmsr r9
...@@ -233,29 +265,27 @@ no_ld_last_inst: ...@@ -233,29 +265,27 @@ no_ld_last_inst:
slb_do_exit: slb_do_exit:
/* Restore registers */ /* Register usage at this point:
*
ld r11, (PACA_EXMC+EX_DAR)(r13) * R0 = guest last inst
ld r10, (PACA_EXMC+EX_LR)(r13) * R1 = host R1
ld r9, (PACA_EXMC+EX_R3)(r13) * R2 = host R2
* R3 = guest PC
/* Save last inst */ * R4 = guest MSR
stw r0, (PACA_EXMC+EX_LR)(r13) * R5 = guest DAR
* R6 = guest DSISR
/* Save DAR and DSISR before going to paged mode */ * R12 = exit handler id
mfdar r0 * R13 = PACA
std r0, (PACA_EXMC+EX_DAR)(r13) * PACA.KVM.* = guest *
mfdsisr r0 *
stw r0, (PACA_EXMC+EX_DSISR)(r13) */
/* RFI into the highmem handler */ /* RFI into the highmem handler */
mfmsr r0 mfmsr r7
ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
mtsrr1 r0 mtsrr1 r7
ld r0, PACASAVEDMSR(r13) /* Highmem handler address */ ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
mtsrr0 r0 mtsrr0 r8
mfspr r0, SPRN_SPRG_SCRATCH0
RFI RFI
kvmppc_handler_trampoline_exit_end: kvmppc_handler_trampoline_exit_end:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment