Commit caa1faa7 authored by James Hogan's avatar James Hogan Committed by Ralf Baechle

MIPS: KVM: Trivial whitespace and style fixes

A bunch of misc whitespace and style fixes within arch/mips/kvm/.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: kvm@vger.kernel.org
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/11883/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 097d5638
...@@ -2027,7 +2027,8 @@ config KVM_GUEST ...@@ -2027,7 +2027,8 @@ config KVM_GUEST
bool "KVM Guest Kernel" bool "KVM Guest Kernel"
depends on BROKEN_ON_SMP depends on BROKEN_ON_SMP
help help
Select this option if building a guest kernel for KVM (Trap & Emulate) mode Select this option if building a guest kernel for KVM (Trap & Emulate)
mode.
config KVM_GUEST_TIMER_FREQ config KVM_GUEST_TIMER_FREQ
int "Count/Compare Timer Frequency (MHz)" int "Count/Compare Timer Frequency (MHz)"
......
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
#define KVM_MAX_VCPUS 1 #define KVM_MAX_VCPUS 1
#define KVM_USER_MEM_SLOTS 8 #define KVM_USER_MEM_SLOTS 8
/* memory slots that does not exposed to userspace */ /* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_HALT_POLL_NS_DEFAULT 500000
......
...@@ -1243,10 +1243,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, ...@@ -1243,10 +1243,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[MIPS_CP0_STATUS][0]++; cop0->stat[MIPS_CP0_STATUS][0]++;
#endif #endif
if (rt != 0) { if (rt != 0)
vcpu->arch.gprs[rt] = vcpu->arch.gprs[rt] =
kvm_read_c0_guest_status(cop0); kvm_read_c0_guest_status(cop0);
}
/* EI */ /* EI */
if (inst & 0x20) { if (inst & 0x20) {
kvm_debug("[%#lx] mfmcz_op: EI\n", kvm_debug("[%#lx] mfmcz_op: EI\n",
...@@ -2583,9 +2582,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, ...@@ -2583,9 +2582,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
* an entry into the guest TLB. * an entry into the guest TLB.
*/ */
index = kvm_mips_guest_tlb_lookup(vcpu, index = kvm_mips_guest_tlb_lookup(vcpu,
(va & VPN2_MASK) | (va & VPN2_MASK) |
(kvm_read_c0_guest_entryhi (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
(vcpu->arch.cop0) & ASID_MASK));
if (index < 0) { if (index < 0) {
if (exccode == T_TLB_LD_MISS) { if (exccode == T_TLB_LD_MISS) {
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
......
...@@ -335,7 +335,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -335,7 +335,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Now restore the host state just enough to run the handlers */ /* Now restore the host state just enough to run the handlers */
/* Swtich EBASE to the one used by Linux */ /* Switch EBASE to the one used by Linux */
/* load up the host EBASE */ /* load up the host EBASE */
mfc0 v0, CP0_STATUS mfc0 v0, CP0_STATUS
...@@ -490,11 +490,11 @@ __kvm_mips_return_to_guest: ...@@ -490,11 +490,11 @@ __kvm_mips_return_to_guest:
REG_ADDU t3, t1, t2 REG_ADDU t3, t1, t2
LONG_L k0, (t3) LONG_L k0, (t3)
andi k0, k0, 0xff andi k0, k0, 0xff
mtc0 k0,CP0_ENTRYHI mtc0 k0, CP0_ENTRYHI
ehb ehb
/* Disable RDHWR access */ /* Disable RDHWR access */
mtc0 zero, CP0_HWRENA mtc0 zero, CP0_HWRENA
/* load the guest context from VCPU and return */ /* load the guest context from VCPU and return */
LONG_L $0, VCPU_R0(k1) LONG_L $0, VCPU_R0(k1)
...@@ -606,11 +606,11 @@ __kvm_mips_return_to_host: ...@@ -606,11 +606,11 @@ __kvm_mips_return_to_host:
/* Restore RDHWR access */ /* Restore RDHWR access */
PTR_LI k0, 0x2000000F PTR_LI k0, 0x2000000F
mtc0 k0, CP0_HWRENA mtc0 k0, CP0_HWRENA
/* Restore RA, which is the address we will return to */ /* Restore RA, which is the address we will return to */
LONG_L ra, PT_R31(k1) LONG_L ra, PT_R31(k1)
j ra j ra
nop nop
VECTOR_END(MIPSX(GuestExceptionEnd)) VECTOR_END(MIPSX(GuestExceptionEnd))
......
...@@ -673,8 +673,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -673,8 +673,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
local_irq_save(flags); local_irq_save(flags);
if (((vcpu->arch. if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) { ASID_VERSION_MASK) {
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] = vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu]; vcpu->arch.guest_kernel_mm.context.asid[cpu];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment