Commit 0d17aea5 authored by James Hogan's avatar James Hogan Committed by Paolo Bonzini

MIPS: KVM: Use 64-bit CP0_EBase when appropriate

Update the KVM entry point to write CP0_EBase as a 64-bit register when
it is 64-bits wide, and to set the WG (write gate) bit if it exists in
order to write bits 63:30 (or 31:30 on MIPS32).

Prior to MIPS64r6 it was UNDEFINED to perform a 64-bit read or write of
a 32-bit COP0 register. Since this is dynamically generated code,
generate the right type of access depending on whether the kernel is
64-bit and cpu_has_ebase_wg.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1d756942
...@@ -152,6 +152,25 @@ static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, ...@@ -152,6 +152,25 @@ static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
} }
} }
/**
* build_set_exc_base() - Assemble code to write exception base address.
* @p: Code buffer pointer.
* @reg: Source register (generated code may set WG bit in @reg).
*
* Assemble code to modify the exception base address in the EBase register,
* using the appropriately sized access and setting the WG bit if necessary.
*/
static inline void build_set_exc_base(u32 **p, unsigned int reg)
{
if (cpu_has_ebase_wg) {
/* Set WG so that all the bits get written */
uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
UASM_i_MTC0(p, reg, C0_EBASE);
} else {
uasm_i_mtc0(p, reg, C0_EBASE);
}
}
/** /**
* kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
* @addr: Address to start writing code. * @addr: Address to start writing code.
...@@ -216,7 +235,7 @@ void *kvm_mips_build_vcpu_run(void *addr) ...@@ -216,7 +235,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
/* load up the new EBASE */ /* load up the new EBASE */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
uasm_i_mtc0(&p, K0, C0_EBASE); build_set_exc_base(&p, K0);
/* /*
* Now that the new EBASE has been loaded, unset BEV, set * Now that the new EBASE has been loaded, unset BEV, set
...@@ -463,7 +482,7 @@ void *kvm_mips_build_exit(void *addr) ...@@ -463,7 +482,7 @@ void *kvm_mips_build_exit(void *addr)
UASM_i_LA_mostly(&p, K0, (long)&ebase); UASM_i_LA_mostly(&p, K0, (long)&ebase);
UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
uasm_i_mtc0(&p, K0, C0_EBASE); build_set_exc_base(&p, K0);
if (raw_cpu_has_fpu) { if (raw_cpu_has_fpu) {
/* /*
...@@ -620,7 +639,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr) ...@@ -620,7 +639,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
uasm_i_or(&p, K0, V1, AT); uasm_i_or(&p, K0, V1, AT);
uasm_i_mtc0(&p, K0, C0_STATUS); uasm_i_mtc0(&p, K0, C0_STATUS);
uasm_i_ehb(&p); uasm_i_ehb(&p);
uasm_i_mtc0(&p, T0, C0_EBASE); build_set_exc_base(&p, T0);
/* Setup status register for running guest in UM */ /* Setup status register for running guest in UM */
uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment