Commit 8fe07367 authored by Xiantao Zhang's avatar Xiantao Zhang Committed by Avi Kivity

KVM: ia64: Clean up vmm_ivt.S using tab to indent every line

Using tab for indentation for vmm_ivt.S.
Signed-off-by: default avatarXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 9f7d5bb5
/*
* /ia64/kvm_ivt.S
* arch/ia64/kvm/vmm_ivt.S
*
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
......@@ -82,7 +82,7 @@
mov r29=cr.ipsr; \
;; \
tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
(p7)br.sptk.many kvm_dispatch_reflection; \
(p7) br.sptk.many kvm_dispatch_reflection; \
br.sptk.many kvm_vmm_panic; \
GLOBAL_ENTRY(kvm_vmm_panic)
......@@ -115,7 +115,6 @@ ENTRY(kvm_vhpt_miss)
KVM_FAULT(0)
END(kvm_vhpt_miss)
.org kvm_ia64_ivt+0x400
////////////////////////////////////////////////////////////////
// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
......@@ -124,7 +123,7 @@ ENTRY(kvm_itlb_miss)
mov r29=cr.ipsr;
;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
(p6) br.sptk kvm_alt_itlb_miss
(p6) br.sptk kvm_alt_itlb_miss
mov r19 = 1
br.sptk kvm_itlb_miss_dispatch
KVM_FAULT(1);
......@@ -138,7 +137,7 @@ ENTRY(kvm_dtlb_miss)
mov r29=cr.ipsr;
;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
(p6)br.sptk kvm_alt_dtlb_miss
(p6) br.sptk kvm_alt_dtlb_miss
br.sptk kvm_dtlb_miss_dispatch
END(kvm_dtlb_miss)
......@@ -240,7 +239,7 @@ ENTRY(kvm_break_fault)
;;
KVM_SAVE_MIN_WITH_COVER_R19
;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
mov out0=cr.ifa
mov out2=cr.isr // FIXME: pity to make this slow access twice
mov out3=cr.iim // FIXME: pity to make this slow access twice
......@@ -428,9 +427,9 @@ ENTRY(kvm_virtual_exirq)
kvm_dispatch_vexirq:
cmp.eq p6,p0 = 1,r30
;;
(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
(p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
;;
(p6)ld8 r1 = [r29]
(p6) ld8 r1 = [r29]
;;
KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,1,0
......@@ -456,13 +455,11 @@ END(kvm_virtual_exirq)
KVM_FAULT(14)
// this code segment is from 2.6.16.13
.org kvm_ia64_ivt+0x3c00
///////////////////////////////////////////////////////////////////////
// 0x3c00 Entry 15 (size 64 bundles) Reserved
KVM_FAULT(15)
.org kvm_ia64_ivt+0x4000
///////////////////////////////////////////////////////////////////////
// 0x4000 Entry 16 (size 64 bundles) Reserved
......@@ -619,13 +616,13 @@ ENTRY(kvm_virtualization_fault)
cmp.eq p10,p0=EVENT_SSM,r24
cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
cmp.eq p12,p0=EVENT_THASH,r24
(p6) br.dptk.many kvm_asm_mov_from_ar
(p7) br.dptk.many kvm_asm_mov_from_rr
(p8) br.dptk.many kvm_asm_mov_to_rr
(p9) br.dptk.many kvm_asm_rsm
(p10) br.dptk.many kvm_asm_ssm
(p11) br.dptk.many kvm_asm_mov_to_psr
(p12) br.dptk.many kvm_asm_thash
(p6) br.dptk.many kvm_asm_mov_from_ar
(p7) br.dptk.many kvm_asm_mov_from_rr
(p8) br.dptk.many kvm_asm_mov_to_rr
(p9) br.dptk.many kvm_asm_rsm
(p10) br.dptk.many kvm_asm_ssm
(p11) br.dptk.many kvm_asm_mov_to_psr
(p12) br.dptk.many kvm_asm_thash
;;
kvm_virtualization_fault_back:
adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
......@@ -640,7 +637,7 @@ kvm_virtualization_fault_back:
st8 [r17] = r25
;;
cmp.ne p6,p0=EVENT_RFI, r24
(p6) br.sptk kvm_dispatch_virtualization_fault
(p6) br.sptk kvm_dispatch_virtualization_fault
;;
adds r18=VMM_VPD_BASE_OFFSET,r21
;;
......@@ -651,9 +648,9 @@ kvm_virtualization_fault_back:
ld8 r18=[r18]
;;
tbit.z p6,p0=r18,63
(p6) br.sptk kvm_dispatch_virtualization_fault
(p6) br.sptk kvm_dispatch_virtualization_fault
;;
//if vifs.v=1 desert current register frame
//if vifs.v=1 desert current register frame
alloc r18=ar.pfs,0,0,0,0
br.sptk kvm_dispatch_virtualization_fault
END(kvm_virtualization_fault)
......@@ -856,7 +853,7 @@ ENTRY(kvm_itlb_miss_dispatch)
END(kvm_itlb_miss_dispatch)
ENTRY(kvm_dispatch_reflection)
/*
/*
* Input:
* psr.ic: off
* r19: intr type (offset into ivt, see ia64_int.h)
......@@ -893,7 +890,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
;;
KVM_SAVE_MIN_WITH_COVER_R19
;;
alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
mov out0=r13 //vcpu
adds r3=8,r2 // set up second base pointer
;;
......@@ -917,7 +914,6 @@ ENTRY(kvm_dispatch_interrupt)
KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
;;
alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
//mov out0=cr.ivr // pass cr.ivr as first arg
adds r3=8,r2 // set up second base pointer for SAVE_REST
;;
ssm psr.ic
......@@ -934,9 +930,6 @@ ENTRY(kvm_dispatch_interrupt)
br.call.sptk.many b6=kvm_ia64_handle_irq
END(kvm_dispatch_interrupt)
GLOBAL_ENTRY(ia64_leave_nested)
rsm psr.i
;;
......@@ -1065,13 +1058,10 @@ GLOBAL_ENTRY(ia64_leave_nested)
rfi
END(ia64_leave_nested)
GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
/*
/*
* work.need_resched etc. mustn't get changed
*by this CPU before it returns to
;;
* user- or fsys-mode, hence we disable interrupts early on:
*/
adds r2 = PT(R4)+16,r12
......@@ -1293,13 +1283,11 @@ GLOBAL_ENTRY(ia64_vmm_entry)
mov r24=r22
mov r25=r18
tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
(p1) br.cond.sptk.few kvm_vps_resume_normal
(p2) br.cond.sptk.many kvm_vps_resume_handler
(p1) br.cond.sptk.few kvm_vps_resume_normal
(p2) br.cond.sptk.many kvm_vps_resume_handler
;;
END(ia64_vmm_entry)
/*
* extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
* u64 arg3, u64 arg4, u64 arg5,
......@@ -1340,7 +1328,7 @@ hostret = r24
mov b6=entry
br.cond.sptk b6 // call the service
2:
// Architectural sequence for enabling interrupts if necessary
// Architectural sequence for enabling interrupts if necessary
(p7) ssm psr.ic
;;
(p7) srlz.i
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment