Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
bb839439
Commit
bb839439
authored
Jan 31, 2005
by
Russell King
Committed by
Linus Torvalds
Jan 31, 2005
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[ARM] [4/4] Reformat assembly code to be consistent.
parent
2db1b65b
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
293 additions
and
275 deletions
+293
-275
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-armv.S
+293
-275
No files found.
arch/arm/kernel/entry-armv.S
View file @
bb839439
...
...
@@ -34,27 +34,27 @@
.
endm
__pabt_invalid
:
inv_entry
abt
,
BAD_PREFETCH
b
1
f
inv_entry
abt
,
BAD_PREFETCH
b
1
f
__dabt_invalid
:
inv_entry
abt
,
BAD_DATA
b
1
f
inv_entry
abt
,
BAD_DATA
b
1
f
__irq_invalid
:
inv_entry
irq
,
BAD_IRQ
b
1
f
inv_entry
irq
,
BAD_IRQ
b
1
f
__und_invalid
:
inv_entry
und
,
BAD_UNDEFINSTR
inv_entry
und
,
BAD_UNDEFINSTR
1
:
zero_fp
ldmia
r4
,
{
r5
-
r7
}
@
Get
XXX
pc
,
cpsr
,
old_r0
add
r4
,
sp
,
#
S_PC
stmia
r4
,
{
r5
-
r7
}
@
Save
XXX
pc
,
cpsr
,
old_r0
mov
r0
,
sp
and
r2
,
r6
,
#
31
@
int
mode
b
bad_mode
1
:
zero_fp
ldmia
r4
,
{
r5
-
r7
}
@
Get
XXX
pc
,
cpsr
,
old_r0
add
r4
,
sp
,
#
S_PC
stmia
r4
,
{
r5
-
r7
}
@
Save
XXX
pc
,
cpsr
,
old_r0
mov
r0
,
sp
and
r2
,
r6
,
#
31
@
int
mode
b
bad_mode
/*
*
SVC
mode
handlers
...
...
@@ -70,122 +70,129 @@ __und_invalid:
stmia
r5
,
{
r0
-
r4
}
@
save
sp_SVC
,
lr_SVC
,
pc
,
cpsr
,
old_ro
.
endm
.
align
5
.
align
5
__dabt_svc
:
svc_entry
abt
mrs
r9
,
cpsr
@
Enable
interrupts
if
they
were
tst
r3
,
#
PSR_I_BIT
biceq
r9
,
r9
,
#
PSR_I_BIT
@
previously
svc_entry
abt
mrs
r9
,
cpsr
@
Enable
interrupts
if
they
were
tst
r3
,
#
PSR_I_BIT
biceq
r9
,
r9
,
#
PSR_I_BIT
@
previously
/*
*
This
routine
must
not
corrupt
r9
*/
#ifdef MULTI_ABORT
ldr
r4
,
.
LCprocfns
@
pass
r2
,
r3
to
mov
lr
,
pc
@
processor
code
ldr
pc
,
[
r4
]
@
call
processor
specific
code
ldr
r4
,
.
LCprocfns
@
pass
r2
,
r3
to
mov
lr
,
pc
@
processor
code
ldr
pc
,
[
r4
]
@
call
processor
specific
code
#else
bl
CPU_ABORT_HANDLER
bl
CPU_ABORT_HANDLER
#endif
msr
cpsr_c
,
r9
mov
r2
,
sp
bl
do_DataAbort
disable_irq
r0
ldr
r0
,
[
sp
,
#
S_PSR
]
msr
spsr_cxsf
,
r0
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
.
align
5
msr
cpsr_c
,
r9
mov
r2
,
sp
bl
do_DataAbort
disable_irq
r0
ldr
r0
,
[
sp
,
#
S_PSR
]
msr
spsr_cxsf
,
r0
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
.
align
5
__irq_svc
:
svc_entry
irq
svc_entry
irq
#ifdef CONFIG_PREEMPT
get_thread_info
r8
ldr
r9
,
[
r8
,
#
TI_PREEMPT
]
@
get
preempt
count
add
r7
,
r9
,
#
1
@
increment
it
str
r7
,
[
r8
,
#
TI_PREEMPT
]
get_thread_info
r8
ldr
r9
,
[
r8
,
#
TI_PREEMPT
]
@
get
preempt
count
add
r7
,
r9
,
#
1
@
increment
it
str
r7
,
[
r8
,
#
TI_PREEMPT
]
#endif
1
:
get_irqnr_and_base
r0
,
r6
,
r5
,
lr
movne
r1
,
sp
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
adrsvc
ne
,
lr
,
1
b
bne
asm_do_IRQ
1
:
get_irqnr_and_base
r0
,
r6
,
r5
,
lr
movne
r1
,
sp
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
adrsvc
ne
,
lr
,
1
b
bne
asm_do_IRQ
#ifdef CONFIG_PREEMPT
ldr
r0
,
[
r8
,
#
TI_FLAGS
]
@
get
flags
tst
r0
,
#
_TIF_NEED_RESCHED
blne
svc_preempt
ldr
r0
,
[
r8
,
#
TI_FLAGS
]
@
get
flags
tst
r0
,
#
_TIF_NEED_RESCHED
blne
svc_preempt
preempt_return
:
ldr
r0
,
[
r8
,
#
TI_PREEMPT
]
@
read
preempt
value
teq
r0
,
r7
str
r9
,
[
r8
,
#
TI_PREEMPT
]
@
restore
preempt
count
strne
r0
,
[
r0
,
-
r0
]
@
bug
()
ldr
r0
,
[
r8
,
#
TI_PREEMPT
]
@
read
preempt
value
teq
r0
,
r7
str
r9
,
[
r8
,
#
TI_PREEMPT
]
@
restore
preempt
count
strne
r0
,
[
r0
,
-
r0
]
@
bug
()
#endif
ldr
r0
,
[
sp
,
#
S_PSR
]
@
irqs
are
already
disabled
msr
spsr_cxsf
,
r0
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
ldr
r0
,
[
sp
,
#
S_PSR
]
@
irqs
are
already
disabled
msr
spsr_cxsf
,
r0
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
.
ltorg
.
ltorg
#ifdef CONFIG_PREEMPT
svc_preempt
:
teq
r9
,
#
0
@
was
preempt
count
=
0
ldreq
r6
,
.
LCirq_stat
movne
pc
,
lr
@
no
ldr
r0
,
[
r6
,
#
4
]
@
local_irq_count
ldr
r1
,
[
r6
,
#
8
]
@
local_bh_count
adds
r0
,
r0
,
r1
movne
pc
,
lr
mov
r7
,
#
PREEMPT_ACTIVE
str
r7
,
[
r8
,
#
TI_PREEMPT
]
@
set
PREEMPT_ACTIVE
1
:
enable_irq
r2
@
enable
IRQs
bl
schedule
disable_irq
r0
@
disable
IRQs
ldr
r0
,
[
r8
,
#
TI_FLAGS
]
@
get
new
tasks
TI_FLAGS
tst
r0
,
#
_TIF_NEED_RESCHED
beq
preempt_return
@
go
again
b
1
b
svc_preempt
:
teq
r9
,
#
0
@
was
preempt
count
=
0
ldreq
r6
,
.
LCirq_stat
movne
pc
,
lr
@
no
ldr
r0
,
[
r6
,
#
4
]
@
local_irq_count
ldr
r1
,
[
r6
,
#
8
]
@
local_bh_count
adds
r0
,
r0
,
r1
movne
pc
,
lr
mov
r7
,
#
PREEMPT_ACTIVE
str
r7
,
[
r8
,
#
TI_PREEMPT
]
@
set
PREEMPT_ACTIVE
1
:
enable_irq
r2
@
enable
IRQs
bl
schedule
disable_irq
r0
@
disable
IRQs
ldr
r0
,
[
r8
,
#
TI_FLAGS
]
@
get
new
tasks
TI_FLAGS
tst
r0
,
#
_TIF_NEED_RESCHED
beq
preempt_return
@
go
again
b
1
b
#endif
.
align
5
.
align
5
__und_svc
:
svc_entry
und
svc_entry
und
ldr
r0
,
[
r2
,
#-
4
]
@
r0
=
instruction
adrsvc
al
,
r9
,
1
f
@
r9
=
normal
FP
return
bl
call_fpe
@
lr
=
undefined
instr
return
ldr
r0
,
[
r2
,
#-
4
]
@
r0
=
instruction
adrsvc
al
,
r9
,
1
f
@
r9
=
normal
FP
return
bl
call_fpe
@
lr
=
undefined
instr
return
mov
r0
,
sp
@
struct
pt_regs
*
regs
bl
do_undefinstr
mov
r0
,
sp
@
struct
pt_regs
*
regs
bl
do_undefinstr
1
:
disable_irq
r0
ldr
lr
,
[
sp
,
#
S_PSR
]
@
Get
SVC
cpsr
msr
spsr_cxsf
,
lr
ldmia
sp
,
{
r0
-
pc
}^
@
Restore
SVC
registers
1
:
disable_irq
r0
ldr
lr
,
[
sp
,
#
S_PSR
]
@
Get
SVC
cpsr
msr
spsr_cxsf
,
lr
ldmia
sp
,
{
r0
-
pc
}^
@
Restore
SVC
registers
.
align
5
.
align
5
__pabt_svc
:
svc_entry
abt
mrs
r9
,
cpsr
@
Enable
interrupts
if
they
were
tst
r3
,
#
PSR_I_BIT
biceq
r9
,
r9
,
#
PSR_I_BIT
@
previously
msr
cpsr_c
,
r9
mov
r0
,
r2
@
address
(
pc
)
mov
r1
,
sp
@
regs
bl
do_PrefetchAbort
@
call
abort
handler
disable_irq
r0
ldr
r0
,
[
sp
,
#
S_PSR
]
msr
spsr_cxsf
,
r0
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
.
align
5
.
LCirq
:
.
word
__temp_irq
.
LCund
:
.
word
__temp_und
.
LCabt
:
.
word
__temp_abt
svc_entry
abt
mrs
r9
,
cpsr
@
Enable
interrupts
if
they
were
tst
r3
,
#
PSR_I_BIT
biceq
r9
,
r9
,
#
PSR_I_BIT
@
previously
msr
cpsr_c
,
r9
mov
r0
,
r2
@
address
(
pc
)
mov
r1
,
sp
@
regs
bl
do_PrefetchAbort
@
call
abort
handler
disable_irq
r0
ldr
r0
,
[
sp
,
#
S_PSR
]
msr
spsr_cxsf
,
r0
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
.
align
5
.
LCirq
:
.
word
__temp_irq
.
LCund
:
.
word
__temp_und
.
LCabt
:
.
word
__temp_abt
#ifdef MULTI_ABORT
.
LCprocfns
:
.
word
processor
.
LCprocfns
:
.
word
processor
#endif
.
LCfp
:
.
word
fp_enter
.
LCfp
:
.
word
fp_enter
#ifdef CONFIG_PREEMPT
.
LCirq_stat
:
.
word
irq_stat
.
LCirq_stat
:
.
word
irq_stat
#endif
/*
...
...
@@ -201,76 +208,76 @@ __pabt_svc:
stmdb
r5
,
{
sp
,
lr
}^
.
endm
.
align
5
.
align
5
__dabt_usr
:
usr_entry
abt
alignment_trap
r7
,
r0
,
__temp_abt
zero_fp
usr_entry
abt
alignment_trap
r7
,
r0
,
__temp_abt
zero_fp
#ifdef MULTI_ABORT
ldr
r4
,
.
LCprocfns
@
pass
r2
,
r3
to
mov
lr
,
pc
@
processor
code
ldr
pc
,
[
r4
]
@
call
processor
specific
code
ldr
r4
,
.
LCprocfns
@
pass
r2
,
r3
to
mov
lr
,
pc
@
processor
code
ldr
pc
,
[
r4
]
@
call
processor
specific
code
#else
bl
CPU_ABORT_HANDLER
bl
CPU_ABORT_HANDLER
#endif
enable_irq
r2
@
Enable
interrupts
mov
r2
,
sp
adrsvc
al
,
lr
,
ret_from_exception
b
do_DataAbort
enable_irq
r2
@
Enable
interrupts
mov
r2
,
sp
adrsvc
al
,
lr
,
ret_from_exception
b
do_DataAbort
.
align
5
.
align
5
__irq_usr
:
usr_entry
irq
alignment_trap
r7
,
r0
,
__temp_irq
zero_fp
usr_entry
irq
alignment_trap
r7
,
r0
,
__temp_irq
zero_fp
#ifdef CONFIG_PREEMPT
get_thread_info
r8
ldr
r9
,
[
r8
,
#
TI_PREEMPT
]
@
get
preempt
count
add
r7
,
r9
,
#
1
@
increment
it
str
r7
,
[
r8
,
#
TI_PREEMPT
]
get_thread_info
r8
ldr
r9
,
[
r8
,
#
TI_PREEMPT
]
@
get
preempt
count
add
r7
,
r9
,
#
1
@
increment
it
str
r7
,
[
r8
,
#
TI_PREEMPT
]
#endif
1
:
get_irqnr_and_base
r0
,
r6
,
r5
,
lr
movne
r1
,
sp
adrsvc
ne
,
lr
,
1
b
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
bne
asm_do_IRQ
1
:
get_irqnr_and_base
r0
,
r6
,
r5
,
lr
movne
r1
,
sp
adrsvc
ne
,
lr
,
1
b
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
bne
asm_do_IRQ
#ifdef CONFIG_PREEMPT
ldr
r0
,
[
r8
,
#
TI_PREEMPT
]
teq
r0
,
r7
str
r9
,
[
r8
,
#
TI_PREEMPT
]
strne
r0
,
[
r0
,
-
r0
]
mov
tsk
,
r8
ldr
r0
,
[
r8
,
#
TI_PREEMPT
]
teq
r0
,
r7
str
r9
,
[
r8
,
#
TI_PREEMPT
]
strne
r0
,
[
r0
,
-
r0
]
mov
tsk
,
r8
#else
get_thread_info
tsk
get_thread_info
tsk
#endif
mov
why
,
#
0
b
ret_to_user
mov
why
,
#
0
b
ret_to_user
.
ltorg
.
ltorg
.
align
5
.
align
5
__und_usr
:
usr_entry
und
alignment_trap
r7
,
r0
,
__temp_und
zero_fp
tst
r3
,
#
PSR_T_BIT
@
Thumb
mode
?
bne
fpundefinstr
@
ignore
FP
sub
r4
,
r2
,
#
4
1
:
ldrt
r0
,
[
r4
]
@
r0
=
instruction
adrsvc
al
,
r9
,
ret_from_exception
@
r9
=
normal
FP
return
adrsvc
al
,
lr
,
fpundefinstr
@
lr
=
undefined
instr
return
usr_entry
und
alignment_trap
r7
,
r0
,
__temp_und
zero_fp
tst
r3
,
#
PSR_T_BIT
@
Thumb
mode
?
bne
fpundefinstr
@
ignore
FP
sub
r4
,
r2
,
#
4
1
:
ldrt
r0
,
[
r4
]
@
r0
=
instruction
adrsvc
al
,
r9
,
ret_from_exception
@
r9
=
normal
FP
return
adrsvc
al
,
lr
,
fpundefinstr
@
lr
=
undefined
instr
return
/*
*
The
out
of
line
fixup
for
the
ldrt
above
.
*/
.
section
.
fixup
,
"ax"
2
:
mov
pc
,
r9
.
previous
.
section
__ex_table
,
"a"
.
long
1
b
,
2
b
.
previous
.
section
.
fixup
,
"ax"
2
:
mov
pc
,
r9
.
previous
.
section
__ex_table
,
"a"
.
long
1
b
,
2
b
.
previous
/*
*
r0
=
instruction
.
...
...
@@ -289,53 +296,54 @@ __und_usr:
*
r10
-
this
threads
thread_info
structure
.
*/
call_fpe
:
tst
r0
,
#
0x08000000
@
only
CDP
/
CPRT
/
LDC
/
STC
have
bit
27
tst
r0
,
#
0x08000000
@
only
CDP
/
CPRT
/
LDC
/
STC
have
bit
27
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
and
r8
,
r0
,
#
0x0f000000
@
mask
out
op
-
code
bits
teqne
r8
,
#
0x0f000000
@
SWI
(
ARM6
/
7
bug
)?
and
r8
,
r0
,
#
0x0f000000
@
mask
out
op
-
code
bits
teqne
r8
,
#
0x0f000000
@
SWI
(
ARM6
/
7
bug
)?
#endif
moveq
pc
,
lr
get_thread_info
r10
@
get
current
thread
and
r8
,
r0
,
#
0x00000f00
@
mask
out
CP
number
mov
r7
,
#
1
add
r6
,
r10
,
#
TI_USED_CP
strb
r7
,
[
r6
,
r8
,
lsr
#
8
]
@
set
appropriate
used_cp
[]
moveq
pc
,
lr
get_thread_info
r10
@
get
current
thread
and
r8
,
r0
,
#
0x00000f00
@
mask
out
CP
number
mov
r7
,
#
1
add
r6
,
r10
,
#
TI_USED_CP
strb
r7
,
[
r6
,
r8
,
lsr
#
8
]
@
set
appropriate
used_cp
[]
#ifdef CONFIG_IWMMXT
@
Test
if
we
need
to
give
access
to
iWMMXt
coprocessors
ldr
r5
,
[
r10
,
#
TI_FLAGS
]
rsbs
r7
,
r8
,
#(
1
<<
8
)
@
CP
0
or
1
only
movcss
r7
,
r5
,
lsr
#(
TIF_USING_IWMMXT
+
1
)
bcs
iwmmxt_task_enable
@
Test
if
we
need
to
give
access
to
iWMMXt
coprocessors
ldr
r5
,
[
r10
,
#
TI_FLAGS
]
rsbs
r7
,
r8
,
#(
1
<<
8
)
@
CP
0
or
1
only
movcss
r7
,
r5
,
lsr
#(
TIF_USING_IWMMXT
+
1
)
bcs
iwmmxt_task_enable
#endif
enable_irq
r7
add
pc
,
pc
,
r8
,
lsr
#
6
mov
r0
,
r0
mov
pc
,
lr
@
CP
#
0
b
do_fpe
@
CP
#
1
(
FPE
)
b
do_fpe
@
CP
#
2
(
FPE
)
mov
pc
,
lr
@
CP
#
3
mov
pc
,
lr
@
CP
#
4
mov
pc
,
lr
@
CP
#
5
mov
pc
,
lr
@
CP
#
6
mov
pc
,
lr
@
CP
#
7
mov
pc
,
lr
@
CP
#
8
mov
pc
,
lr
@
CP
#
9
enable_irq
r7
add
pc
,
pc
,
r8
,
lsr
#
6
mov
r0
,
r0
mov
pc
,
lr
@
CP
#
0
b
do_fpe
@
CP
#
1
(
FPE
)
b
do_fpe
@
CP
#
2
(
FPE
)
mov
pc
,
lr
@
CP
#
3
mov
pc
,
lr
@
CP
#
4
mov
pc
,
lr
@
CP
#
5
mov
pc
,
lr
@
CP
#
6
mov
pc
,
lr
@
CP
#
7
mov
pc
,
lr
@
CP
#
8
mov
pc
,
lr
@
CP
#
9
#ifdef CONFIG_VFP
b
do_vfp
@
CP
#
10
(
VFP
)
b
do_vfp
@
CP
#
11
(
VFP
)
b
do_vfp
@
CP
#
10
(
VFP
)
b
do_vfp
@
CP
#
11
(
VFP
)
#else
mov
pc
,
lr
@
CP
#
10
(
VFP
)
mov
pc
,
lr
@
CP
#
11
(
VFP
)
mov
pc
,
lr
@
CP
#
10
(
VFP
)
mov
pc
,
lr
@
CP
#
11
(
VFP
)
#endif
mov
pc
,
lr
@
CP
#
12
mov
pc
,
lr
@
CP
#
13
mov
pc
,
lr
@
CP
#
14
(
Debug
)
mov
pc
,
lr
@
CP
#
15
(
Control
)
mov
pc
,
lr
@
CP
#
12
mov
pc
,
lr
@
CP
#
13
mov
pc
,
lr
@
CP
#
14
(
Debug
)
mov
pc
,
lr
@
CP
#
15
(
Control
)
do_fpe
:
ldr
r4
,
.
LCfp
add
r10
,
r10
,
#
TI_FPSTATE
@
r10
=
workspace
ldr
pc
,
[
r4
]
@
Call
FP
module
USR
entry
point
do_fpe
:
ldr
r4
,
.
LCfp
add
r10
,
r10
,
#
TI_FPSTATE
@
r10
=
workspace
ldr
pc
,
[
r4
]
@
Call
FP
module
USR
entry
point
/*
*
The
FP
module
is
called
with
these
registers
set
:
...
...
@@ -346,32 +354,33 @@ do_fpe: ldr r4, .LCfp
*
lr
=
unrecognised
FP
instruction
return
address
*/
.
data
.
data
ENTRY
(
fp_enter
)
.
word
fpundefinstr
.
text
.
word
fpundefinstr
.
text
fpundefinstr
:
mov
r0
,
sp
adrsvc
al
,
lr
,
ret_from_exception
b
do_undefinstr
fpundefinstr
:
mov
r0
,
sp
adrsvc
al
,
lr
,
ret_from_exception
b
do_undefinstr
.
align
5
.
align
5
__pabt_usr
:
usr_entry
abt
alignment_trap
r7
,
r0
,
__temp_abt
zero_fp
enable_irq
r0
@
Enable
interrupts
mov
r0
,
r5
@
address
(
pc
)
mov
r1
,
sp
@
regs
bl
do_PrefetchAbort
@
call
abort
handler
/
*
fall
through
*/
usr_entry
abt
alignment_trap
r7
,
r0
,
__temp_abt
zero_fp
enable_irq
r0
@
Enable
interrupts
mov
r0
,
r2
@
address
(
pc
)
mov
r1
,
sp
@
regs
bl
do_PrefetchAbort
@
call
abort
handler
/
*
fall
through
*/
/*
*
This
is
the
return
code
to
user
mode
for
abort
handlers
*/
ENTRY
(
ret_from_exception
)
get_thread_info
tsk
mov
why
,
#
0
b
ret_to_user
get_thread_info
tsk
mov
why
,
#
0
b
ret_to_user
/*
*
Register
switch
for
ARMv3
and
ARMv4
processors
...
...
@@ -379,31 +388,31 @@ ENTRY(ret_from_exception)
*
previous
and
next
are
guaranteed
not
to
be
the
same
.
*/
ENTRY
(
__switch_to
)
add
ip
,
r1
,
#
TI_CPU_SAVE
ldr
r3
,
[
r2
,
#
TI_CPU_DOMAIN
]!
stmia
ip
!,
{
r4
-
sl
,
fp
,
sp
,
lr
}
@
Store
most
regs
on
stack
add
ip
,
r1
,
#
TI_CPU_SAVE
ldr
r3
,
[
r2
,
#
TI_CPU_DOMAIN
]!
stmia
ip
!,
{
r4
-
sl
,
fp
,
sp
,
lr
}
@
Store
most
regs
on
stack
#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
mra
r4
,
r5
,
acc0
stmia
ip
,
{
r4
,
r5
}
mra
r4
,
r5
,
acc0
stmia
ip
,
{
r4
,
r5
}
#endif
mcr
p15
,
0
,
r3
,
c3
,
c0
,
0
@
Set
domain
register
mcr
p15
,
0
,
r3
,
c3
,
c0
,
0
@
Set
domain
register
#ifdef CONFIG_VFP
@
Always
disable
VFP
so
we
can
lazily
save
/
restore
the
old
@
state
.
This
occurs
in
the
context
of
the
previous
thread
.
VFPFMRX
r4
,
FPEXC
bic
r4
,
r4
,
#
FPEXC_ENABLE
VFPFMXR
FPEXC
,
r4
@
Always
disable
VFP
so
we
can
lazily
save
/
restore
the
old
@
state
.
This
occurs
in
the
context
of
the
previous
thread
.
VFPFMRX
r4
,
FPEXC
bic
r4
,
r4
,
#
FPEXC_ENABLE
VFPFMXR
FPEXC
,
r4
#endif
#if defined(CONFIG_IWMMXT)
bl
iwmmxt_task_switch
bl
iwmmxt_task_switch
#elif defined(CONFIG_CPU_XSCALE)
add
r4
,
r2
,
#
40
@
cpu_context_save
->
extra
ldmib
r4
,
{
r4
,
r5
}
mar
acc0
,
r4
,
r5
add
r4
,
r2
,
#
40
@
cpu_context_save
->
extra
ldmib
r4
,
{
r4
,
r5
}
mar
acc0
,
r4
,
r5
#endif
ldmib
r2
,
{
r4
-
sl
,
fp
,
sp
,
pc
}
@
Load
all
regs
saved
previously
ldmib
r2
,
{
r4
-
sl
,
fp
,
sp
,
pc
}
@
Load
all
regs
saved
previously
__INIT
__INIT
/*
*
Vector
stubs
.
*
...
...
@@ -541,8 +550,9 @@ __stubs_start:
*
other
mode
than
FIQ
...
Ok
you
can
switch
to
another
mode
,
but
you
can
't
*
get
out
of
that
mode
without
clobbering
one
register
.
*/
vector_fiq
:
disable_fiq
subs
pc
,
lr
,
#
4
vector_fiq
:
disable_fiq
subs
pc
,
lr
,
#
4
/*=============================================================================
*
Address
exception
handler
...
...
@@ -552,70 +562,78 @@ vector_fiq: disable_fiq
*/
vector_addrexcptn
:
b
vector_addrexcptn
b
vector_addrexcptn
/*
*
We
group
all
the
following
data
together
to
optimise
*
for
CPUs
with
separate
I
&
D
caches
.
*/
.
align
5
.
align
5
.
LCvswi
:
.
word
vector_swi
.
LCvswi
:
.
word
vector_swi
.
LCsirq
:
.
word
__temp_irq
.
LCsund
:
.
word
__temp_und
.
LCsabt
:
.
word
__temp_abt
.
LCsirq
:
.
word
__temp_irq
.
LCsund
:
.
word
__temp_und
.
LCsabt
:
.
word
__temp_abt
__stubs_end
:
.
equ
__real_stubs_start
,
.
LCvectors
+
0x200
.
equ
__real_stubs_start
,
.
LCvectors
+
0x200
.
LCvectors
:
swi
SYS_ERROR0
b
__real_stubs_start
+
(
vector_und
-
__stubs_start
)
ldr
pc
,
__real_stubs_start
+
(
.
LCvswi
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_pabt
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_dabt
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_addrexcptn
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_irq
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_fiq
-
__stubs_start
)
.
LCvectors
:
swi
SYS_ERROR0
b
__real_stubs_start
+
(
vector_und
-
__stubs_start
)
ldr
pc
,
__real_stubs_start
+
(
.
LCvswi
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_pabt
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_dabt
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_addrexcptn
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_irq
-
__stubs_start
)
b
__real_stubs_start
+
(
vector_fiq
-
__stubs_start
)
ENTRY
(
__trap_init
)
stmfd
sp
!,
{
r4
-
r6
,
lr
}
stmfd
sp
!,
{
r4
-
r6
,
lr
}
mov
r0
,
#
0xff000000
orr
r0
,
r0
,
#
0x00ff0000
@
high
vectors
position
adr
r1
,
.
LCvectors
@
set
up
the
vectors
ldmia
r1
,
{
r1
,
r2
,
r3
,
r4
,
r5
,
r6
,
ip
,
lr
}
stmia
r0
,
{
r1
,
r2
,
r3
,
r4
,
r5
,
r6
,
ip
,
lr
}
mov
r0
,
#
0xff000000
orr
r0
,
r0
,
#
0x00ff0000
@
high
vectors
position
adr
r1
,
.
LCvectors
@
set
up
the
vectors
ldmia
r1
,
{
r1
,
r2
,
r3
,
r4
,
r5
,
r6
,
ip
,
lr
}
stmia
r0
,
{
r1
,
r2
,
r3
,
r4
,
r5
,
r6
,
ip
,
lr
}
add
r2
,
r0
,
#
0x200
adr
r0
,
__stubs_start
@
copy
stubs
to
0x200
adr
r1
,
__stubs_end
1
:
ldr
r3
,
[
r0
],
#
4
str
r3
,
[
r2
],
#
4
cmp
r0
,
r1
blt
1
b
LOADREGS
(
fd
,
sp
!,
{
r4
-
r6
,
pc
})
add
r2
,
r0
,
#
0x200
adr
r0
,
__stubs_start
@
copy
stubs
to
0x200
adr
r1
,
__stubs_end
1
:
ldr
r3
,
[
r0
],
#
4
str
r3
,
[
r2
],
#
4
cmp
r0
,
r1
blt
1
b
LOADREGS
(
fd
,
sp
!,
{
r4
-
r6
,
pc
})
.
data
.
data
/*
*
Do
not
reorder
these
,
and
do
not
insert
extra
data
between
...
*/
__temp_irq
:
.
word
0
@
saved
lr_irq
.
word
0
@
saved
spsr_irq
.
word
-
1
@
old_r0
__temp_und
:
.
word
0
@
Saved
lr_und
.
word
0
@
Saved
spsr_und
.
word
-
1
@
old_r0
__temp_abt
:
.
word
0
@
Saved
lr_abt
.
word
0
@
Saved
spsr_abt
.
word
-
1
@
old_r0
.
globl
cr_alignment
.
globl
cr_no_alignment
__temp_irq
:
.
word
0
@
saved
lr_irq
.
word
0
@
saved
spsr_irq
.
word
-
1
@
old_r0
__temp_und
:
.
word
0
@
Saved
lr_und
.
word
0
@
Saved
spsr_und
.
word
-
1
@
old_r0
__temp_abt
:
.
word
0
@
Saved
lr_abt
.
word
0
@
Saved
spsr_abt
.
word
-
1
@
old_r0
.
globl
cr_alignment
.
globl
cr_no_alignment
cr_alignment
:
.
space
4
.
space
4
cr_no_alignment
:
.
space
4
.
space
4
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment