Commit 2c07ebbd authored by David Daney's avatar David Daney Committed by Gleb Natapov

mips/kvm: Improve code formatting in arch/mips/kvm/kvm_locore.S

No code changes, just reflowing some comments and consistently using
tabs and spaces.  Object code is verified to be unchanged.
Signed-off-by: default avatarDavid Daney <david.daney@cavium.com>
Acked-by: default avatarRalf Baechle <ralf@linux-mips.org>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarJames Hogan <james.hogan@imgtec.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent cc2df20c
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Main entry point for the guest, exception handling. * Main entry point for the guest, exception handling.
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
...@@ -62,7 +62,7 @@ FEXPORT(__kvm_mips_vcpu_run) ...@@ -62,7 +62,7 @@ FEXPORT(__kvm_mips_vcpu_run)
.set noat .set noat
/* k0/k1 not being used in host kernel context */ /* k0/k1 not being used in host kernel context */
addiu k1,sp, -PT_SIZE addiu k1, sp, -PT_SIZE
LONG_S $0, PT_R0(k1) LONG_S $0, PT_R0(k1)
LONG_S $1, PT_R1(k1) LONG_S $1, PT_R1(k1)
LONG_S $2, PT_R2(k1) LONG_S $2, PT_R2(k1)
...@@ -111,7 +111,7 @@ FEXPORT(__kvm_mips_vcpu_run) ...@@ -111,7 +111,7 @@ FEXPORT(__kvm_mips_vcpu_run)
LONG_S v0, PT_STATUS(k1) LONG_S v0, PT_STATUS(k1)
/* Save host ASID, shove it into the BVADDR location */ /* Save host ASID, shove it into the BVADDR location */
mfc0 v1,CP0_ENTRYHI mfc0 v1, CP0_ENTRYHI
andi v1, 0xff andi v1, 0xff
LONG_S v1, PT_HOST_ASID(k1) LONG_S v1, PT_HOST_ASID(k1)
...@@ -120,33 +120,38 @@ FEXPORT(__kvm_mips_vcpu_run) ...@@ -120,33 +120,38 @@ FEXPORT(__kvm_mips_vcpu_run)
LONG_S v1, PT_HOST_USERLOCAL(k1) LONG_S v1, PT_HOST_USERLOCAL(k1)
/* DDATA_LO has pointer to vcpu */ /* DDATA_LO has pointer to vcpu */
mtc0 a1,CP0_DDATA_LO mtc0 a1, CP0_DDATA_LO
/* Offset into vcpu->arch */ /* Offset into vcpu->arch */
addiu k1, a1, VCPU_HOST_ARCH addiu k1, a1, VCPU_HOST_ARCH
/* Save the host stack to VCPU, used for exception processing when we exit from the Guest */ /*
* Save the host stack to VCPU, used for exception processing
* when we exit from the Guest
*/
LONG_S sp, VCPU_HOST_STACK(k1) LONG_S sp, VCPU_HOST_STACK(k1)
/* Save the kernel gp as well */ /* Save the kernel gp as well */
LONG_S gp, VCPU_HOST_GP(k1) LONG_S gp, VCPU_HOST_GP(k1)
/* Setup status register for running the guest in UM, interrupts are disabled */ /* Setup status register for running the guest in UM, interrupts are disabled */
li k0,(ST0_EXL | KSU_USER| ST0_BEV) li k0, (ST0_EXL | KSU_USER | ST0_BEV)
mtc0 k0,CP0_STATUS mtc0 k0, CP0_STATUS
ehb ehb
/* load up the new EBASE */ /* load up the new EBASE */
LONG_L k0, VCPU_GUEST_EBASE(k1) LONG_L k0, VCPU_GUEST_EBASE(k1)
mtc0 k0,CP0_EBASE mtc0 k0, CP0_EBASE
/* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was /*
* but make sure that timer interrupts are enabled * Now that the new EBASE has been loaded, unset BEV, set
* interrupt mask as it was but make sure that timer interrupts
* are enabled
*/ */
li k0,(ST0_EXL | KSU_USER | ST0_IE) li k0, (ST0_EXL | KSU_USER | ST0_IE)
andi v0, v0, ST0_IM andi v0, v0, ST0_IM
or k0, k0, v0 or k0, k0, v0
mtc0 k0,CP0_STATUS mtc0 k0, CP0_STATUS
ehb ehb
...@@ -168,7 +173,7 @@ FEXPORT(__kvm_mips_load_asid) ...@@ -168,7 +173,7 @@ FEXPORT(__kvm_mips_load_asid)
addu t3, t1, t2 addu t3, t1, t2
LONG_L k0, (t3) LONG_L k0, (t3)
andi k0, k0, 0xff andi k0, k0, 0xff
mtc0 k0,CP0_ENTRYHI mtc0 k0, CP0_ENTRYHI
ehb ehb
/* Disable RDHWR access */ /* Disable RDHWR access */
...@@ -279,7 +284,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -279,7 +284,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_S $14, VCPU_R14(k1) LONG_S $14, VCPU_R14(k1)
LONG_S $15, VCPU_R15(k1) LONG_S $15, VCPU_R15(k1)
LONG_S $16, VCPU_R16(k1) LONG_S $16, VCPU_R16(k1)
LONG_S $17,VCPU_R17(k1) LONG_S $17, VCPU_R17(k1)
LONG_S $18, VCPU_R18(k1) LONG_S $18, VCPU_R18(k1)
LONG_S $19, VCPU_R19(k1) LONG_S $19, VCPU_R19(k1)
LONG_S $20, VCPU_R20(k1) LONG_S $20, VCPU_R20(k1)
...@@ -327,8 +332,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -327,8 +332,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Save pointer to run in s0, will be saved by the compiler */ /* Save pointer to run in s0, will be saved by the compiler */
move s0, a0 move s0, a0
/* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
/* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */ * process the exception */
mfc0 k0,CP0_EPC mfc0 k0,CP0_EPC
LONG_S k0, VCPU_PC(k1) LONG_S k0, VCPU_PC(k1)
...@@ -373,7 +378,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -373,7 +378,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_L sp, VCPU_HOST_STACK(k1) LONG_L sp, VCPU_HOST_STACK(k1)
/* Saved host state */ /* Saved host state */
addiu sp,sp, -PT_SIZE addiu sp, sp, -PT_SIZE
/* XXXKYMA do we need to load the host ASID, maybe not because the /* XXXKYMA do we need to load the host ASID, maybe not because the
* kernel entries are marked GLOBAL, need to verify * kernel entries are marked GLOBAL, need to verify
...@@ -389,24 +394,27 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -389,24 +394,27 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Jump to handler */ /* Jump to handler */
FEXPORT(__kvm_mips_jump_to_handler) FEXPORT(__kvm_mips_jump_to_handler)
/* XXXKYMA: not sure if this is safe, how large is the stack?? */ /* XXXKYMA: not sure if this is safe, how large is the stack??
/* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */ * Now jump to the kvm_mips_handle_exit() to see if we can deal
la t9,kvm_mips_handle_exit * with this in the kernel */
la t9, kvm_mips_handle_exit
jalr.hb t9 jalr.hb t9
addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */ addiu sp, sp, -CALLFRAME_SIZ /* BD Slot */
/* Return from handler Make sure interrupts are disabled */ /* Return from handler Make sure interrupts are disabled */
di di
ehb ehb
/* XXXKYMA: k0/k1 could have been blown away if we processed an exception /* XXXKYMA: k0/k1 could have been blown away if we processed
* while we were handling the exception from the guest, reload k1 * an exception while we were handling the exception from the
* guest, reload k1
*/ */
move k1, s1 move k1, s1
addiu k1, k1, VCPU_HOST_ARCH addiu k1, k1, VCPU_HOST_ARCH
/* Check return value, should tell us if we are returning to the host (handle I/O etc) /* Check return value, should tell us if we are returning to the
* or resuming the guest * host (handle I/O etc)or resuming the guest
*/ */
andi t0, v0, RESUME_HOST andi t0, v0, RESUME_HOST
bnez t0, __kvm_mips_return_to_host bnez t0, __kvm_mips_return_to_host
...@@ -426,7 +434,7 @@ __kvm_mips_return_to_guest: ...@@ -426,7 +434,7 @@ __kvm_mips_return_to_guest:
.set noat .set noat
mtc0 k0, CP0_STATUS mtc0 k0, CP0_STATUS
ehb ehb
mtc0 t0,CP0_EBASE mtc0 t0, CP0_EBASE
/* Setup status register for running guest in UM */ /* Setup status register for running guest in UM */
.set at .set at
...@@ -436,7 +444,6 @@ __kvm_mips_return_to_guest: ...@@ -436,7 +444,6 @@ __kvm_mips_return_to_guest:
mtc0 v1, CP0_STATUS mtc0 v1, CP0_STATUS
ehb ehb
/* Set Guest EPC */ /* Set Guest EPC */
LONG_L t0, VCPU_PC(k1) LONG_L t0, VCPU_PC(k1)
mtc0 t0, CP0_EPC mtc0 t0, CP0_EPC
...@@ -525,7 +532,8 @@ __kvm_mips_return_to_host: ...@@ -525,7 +532,8 @@ __kvm_mips_return_to_host:
LONG_L $0, PT_R0(k1) LONG_L $0, PT_R0(k1)
LONG_L $1, PT_R1(k1) LONG_L $1, PT_R1(k1)
/* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */ /* r2/v0 is the return code, shift it down by 2 (arithmetic)
* to recover the err code */
sra k0, v0, 2 sra k0, v0, 2
move $2, k0 move $2, k0
...@@ -635,7 +643,6 @@ LEAF(MIPSX(SyncICache)) ...@@ -635,7 +643,6 @@ LEAF(MIPSX(SyncICache))
rdhwr v0, HW_SYNCI_Step rdhwr v0, HW_SYNCI_Step
beq v0, zero, 20f beq v0, zero, 20f
nop nop
10: 10:
synci 0(a0) synci 0(a0)
addu a0, a0, v0 addu a0, a0, v0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment