Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
22b28032
Commit
22b28032
authored
Oct 28, 2005
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://oak/home/sfr/kernels/iseries/work/
parents
45424376
299f6ce4
Changes
18
Hide whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
21 additions
and
3064 deletions
+21
-3064
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/Makefile
+4
-4
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/setup_64.c
+5
-13
arch/powerpc/lib/Makefile
arch/powerpc/lib/Makefile
+8
-4
arch/ppc64/Makefile
arch/ppc64/Makefile
+2
-0
arch/ppc64/kernel/Makefile
arch/ppc64/kernel/Makefile
+1
-1
arch/ppc64/kernel/entry.S
arch/ppc64/kernel/entry.S
+0
-845
arch/ppc64/kernel/misc.S
arch/ppc64/kernel/misc.S
+0
-563
arch/ppc64/lib/Makefile
arch/ppc64/lib/Makefile
+1
-14
arch/ppc64/lib/checksum.S
arch/ppc64/lib/checksum.S
+0
-229
arch/ppc64/lib/copypage.S
arch/ppc64/lib/copypage.S
+0
-121
arch/ppc64/lib/copyuser.S
arch/ppc64/lib/copyuser.S
+0
-576
arch/ppc64/lib/e2a.c
arch/ppc64/lib/e2a.c
+0
-108
arch/ppc64/lib/locks.c
arch/ppc64/lib/locks.c
+0
-95
arch/ppc64/lib/memcpy.S
arch/ppc64/lib/memcpy.S
+0
-172
arch/ppc64/lib/sstep.c
arch/ppc64/lib/sstep.c
+0
-141
arch/ppc64/lib/strcase.c
arch/ppc64/lib/strcase.c
+0
-31
arch/ppc64/lib/string.S
arch/ppc64/lib/string.S
+0
-106
arch/ppc64/lib/usercopy.c
arch/ppc64/lib/usercopy.c
+0
-41
No files found.
arch/powerpc/kernel/Makefile
View file @
22b28032
...
...
@@ -13,7 +13,7 @@ endif
obj-y
:=
semaphore.o cputable.o ptrace.o syscalls.o
\
signal_32.o pmc.o
obj-$(CONFIG_PPC64)
+=
setup_64.o binfmt_elf32.o sys_ppc32.o
\
ptrace32.o
ptrace32.o
systbl.o
obj-$(CONFIG_ALTIVEC)
+=
vecemu.o vector.o
obj-$(CONFIG_POWER4)
+=
idle_power4.o
obj-$(CONFIG_PPC_OF)
+=
of_device.o
...
...
@@ -28,12 +28,11 @@ extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x)
:=
head_44x.o
extra-$(CONFIG_FSL_BOOKE)
:=
head_fsl_booke.o
extra-$(CONFIG_8xx)
:=
head_8xx.o
extra-$(CONFIG_PPC64)
+=
entry_64.o
extra-y
+=
vmlinux.lds
obj-y
+=
process.o init_task.o time.o
\
prom.o
systbl.o
traps.o setup-common.o
obj-$(CONFIG_PPC32)
+=
entry_32.o setup_32.o misc_32.o
prom.o traps.o setup-common.o
obj-$(CONFIG_PPC32)
+=
entry_32.o setup_32.o misc_32.o
systbl.o
obj-$(CONFIG_PPC64)
+=
misc_64.o
obj-$(CONFIG_PPC_OF)
+=
prom_init.o
obj-$(CONFIG_MODULES)
+=
ppc_ksyms.o
...
...
@@ -54,3 +53,4 @@ obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
endif
extra-$(CONFIG_PPC_FPU)
+=
fpu.o
extra-$(CONFIG_PPC64)
+=
entry_64.o
arch/powerpc/kernel/setup_64.c
View file @
22b28032
...
...
@@ -701,17 +701,6 @@ static void __init emergency_stack_init(void)
limit
))
+
PAGE_SIZE
;
}
extern
unsigned
long
*
sys_call_table
;
extern
unsigned
long
sys_ni_syscall
;
#ifdef CONFIG_PPC_MERGE
#define SYS_CALL_ENTRY64(i) sys_call_table[(i) * 2]
#define SYS_CALL_ENTRY32(i) sys_call_table[(i) * 2 + 1]
#else
extern
unsigned
long
*
sys_call_table32
;
#define SYS_CALL_ENTRY64(i) sys_call_table[(i)]
#define SYS_CALL_ENTRY32(i) sys_call_table32[(i)]
#endif
/*
* Called from setup_arch to initialize the bitmap of available
* syscalls in the systemcfg page
...
...
@@ -719,14 +708,17 @@ extern unsigned long *sys_call_table32;
void
__init
setup_syscall_map
(
void
)
{
unsigned
int
i
,
count64
=
0
,
count32
=
0
;
extern
unsigned
long
*
sys_call_table
;
extern
unsigned
long
sys_ni_syscall
;
for
(
i
=
0
;
i
<
__NR_syscalls
;
i
++
)
{
if
(
SYS_CALL_ENTRY64
(
i
)
!=
sys_ni_syscall
)
{
if
(
sys_call_table
[
i
*
2
]
!=
sys_ni_syscall
)
{
count64
++
;
systemcfg
->
syscall_map_64
[
i
>>
5
]
|=
0x80000000UL
>>
(
i
&
0x1f
);
}
if
(
SYS_CALL_ENTRY32
(
i
)
!=
sys_ni_syscall
)
{
if
(
sys_call_table
[
i
*
2
+
1
]
!=
sys_ni_syscall
)
{
count32
++
;
systemcfg
->
syscall_map_32
[
i
>>
5
]
|=
0x80000000UL
>>
(
i
&
0x1f
);
...
...
arch/powerpc/lib/Makefile
View file @
22b28032
...
...
@@ -2,12 +2,16 @@
# Makefile for ppc-specific library files..
#
obj-y
:=
strcase.o string.o
ifeq
($(CONFIG_PPC_MERGE),y)
obj-y
:=
string.o
endif
obj-y
+=
strcase.o
obj-$(CONFIG_PPC32)
+=
div64.o copy_32.o checksum_32.o
obj-$(CONFIG_PPC64)
+=
c
opypage_64.o copyuser_64.o memcpy
_64.o
\
usercopy_64.o sstep.o checksum
_64.o mem_64.o
obj-$(CONFIG_PPC64)
+=
c
hecksum_64.o copypage_64.o copyuser
_64.o
\
memcpy_64.o usercopy
_64.o mem_64.o
obj-$(CONFIG_PPC_ISERIES)
+=
e2a.o
ifeq
($(CONFIG_PPC64),y)
obj-$(CONFIG_SMP)
+=
locks.o
obj-$(CONFIG_DEBUG_KERNEL)
+=
sstep.o
endif
arch/ppc64/Makefile
View file @
22b28032
...
...
@@ -81,12 +81,14 @@ CFLAGS += $(call cc-option,-funit-at-a-time)
head-y
:=
arch
/ppc64/kernel/head.o
head-y
+=
arch
/powerpc/kernel/fpu.o
head-y
+=
arch
/powerpc/kernel/entry_64.o
libs-y
+=
arch
/ppc64/lib/
core-y
+=
arch
/ppc64/kernel/
arch
/powerpc/kernel/
core-y
+=
arch
/powerpc/mm/
core-y
+=
arch
/powerpc/sysdev/
core-y
+=
arch
/powerpc/platforms/
core-y
+=
arch
/powerpc/lib/
core-$(CONFIG_XMON)
+=
arch
/ppc64/xmon/
drivers-$(CONFIG_OPROFILE)
+=
arch
/powerpc/oprofile/
...
...
arch/ppc64/kernel/Makefile
View file @
22b28032
...
...
@@ -7,7 +7,7 @@ ifneq ($(CONFIG_PPC_MERGE),y)
EXTRA_CFLAGS
+=
-mno-minimal-toc
extra-y
:=
head.o vmlinux.lds
obj-y
:=
entry.o
misc.o prom.o
obj-y
:=
misc.o prom.o
endif
...
...
arch/ppc64/kernel/entry.S
deleted
100644 → 0
View file @
45424376
/*
*
arch
/
ppc64
/
kernel
/
entry
.
S
*
*
PowerPC
version
*
Copyright
(
C
)
1995
-
1996
Gary
Thomas
(
gdt
@
linuxppc
.
org
)
*
Rewritten
by
Cort
Dougan
(
cort
@
cs
.
nmt
.
edu
)
for
PReP
*
Copyright
(
C
)
1996
Cort
Dougan
<
cort
@
cs
.
nmt
.
edu
>
*
Adapted
for
Power
Macintosh
by
Paul
Mackerras
.
*
Low
-
level
exception
handlers
and
MMU
support
*
rewritten
by
Paul
Mackerras
.
*
Copyright
(
C
)
1996
Paul
Mackerras
.
*
MPC8xx
modifications
Copyright
(
C
)
1997
Dan
Malek
(
dmalek
@
jlc
.
net
)
.
*
*
This
file
contains
the
system
call
entry
code
,
context
switch
*
code
,
and
exception
/
interrupt
return
code
for
PowerPC
.
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
#endif
/*
*
System
calls
.
*/
.
section
".toc"
,
"aw"
.
SYS_CALL_TABLE
:
.
tc
.
sys_call_table
[
TC
],
.
sys_call_table
.
SYS_CALL_TABLE32
:
.
tc
.
sys_call_table32
[
TC
],
.
sys_call_table32
/*
This
value
is
used
to
mark
exception
frames
on
the
stack
.
*/
exception_marker
:
.
tc
ID_72656773_68657265
[
TC
],
0x7265677368657265
.
section
".text"
.
align
7
#undef SHOW_SYSCALLS
.
globl
system_call_common
system_call_common
:
andi
.
r10
,
r12
,
MSR_PR
mr
r10
,
r1
addi
r1
,
r1
,-
INT_FRAME_SIZE
beq
-
1
f
ld
r1
,
PACAKSAVE
(
r13
)
1
:
std
r10
,
0
(
r1
)
std
r11
,
_NIP
(
r1
)
std
r12
,
_MSR
(
r1
)
std
r0
,
GPR0
(
r1
)
std
r10
,
GPR1
(
r1
)
std
r2
,
GPR2
(
r1
)
std
r3
,
GPR3
(
r1
)
std
r4
,
GPR4
(
r1
)
std
r5
,
GPR5
(
r1
)
std
r6
,
GPR6
(
r1
)
std
r7
,
GPR7
(
r1
)
std
r8
,
GPR8
(
r1
)
li
r11
,
0
std
r11
,
GPR9
(
r1
)
std
r11
,
GPR10
(
r1
)
std
r11
,
GPR11
(
r1
)
std
r11
,
GPR12
(
r1
)
std
r9
,
GPR13
(
r1
)
crclr
so
mfcr
r9
mflr
r10
li
r11
,
0xc01
std
r9
,
_CCR
(
r1
)
std
r10
,
_LINK
(
r1
)
std
r11
,
_TRAP
(
r1
)
mfxer
r9
mfctr
r10
std
r9
,
_XER
(
r1
)
std
r10
,
_CTR
(
r1
)
std
r3
,
ORIG_GPR3
(
r1
)
ld
r2
,
PACATOC
(
r13
)
addi
r9
,
r1
,
STACK_FRAME_OVERHEAD
ld
r11
,
exception_marker
@
toc
(
r2
)
std
r11
,-
16
(
r9
)
/*
"regshere"
marker
*/
#ifdef CONFIG_PPC_ISERIES
/
*
Hack
for
handling
interrupts
when
soft
-
enabling
on
iSeries
*/
cmpdi
cr1
,
r0
,
0x5555
/*
syscall
0x5555
*/
andi
.
r10
,
r12
,
MSR_PR
/*
from
kernel
*/
crand
4
*
cr0
+
eq
,
4
*
cr1
+
eq
,
4
*
cr0
+
eq
beq
hardware_interrupt_entry
lbz
r10
,
PACAPROCENABLED
(
r13
)
std
r10
,
SOFTE
(
r1
)
#endif
mfmsr
r11
ori
r11
,
r11
,
MSR_EE
mtmsrd
r11
,
1
#ifdef SHOW_SYSCALLS
bl
.
do_show_syscall
REST_GPR
(0,
r1
)
REST_4GPRS
(3,
r1
)
REST_2GPRS
(7,
r1
)
addi
r9
,
r1
,
STACK_FRAME_OVERHEAD
#endif
clrrdi
r11
,
r1
,
THREAD_SHIFT
li
r12
,
0
ld
r10
,
TI_FLAGS
(
r11
)
stb
r12
,
TI_SC_NOERR
(
r11
)
andi
.
r11
,
r10
,
_TIF_SYSCALL_T_OR_A
bne
-
syscall_dotrace
syscall_dotrace_cont
:
cmpldi
0
,
r0
,
NR_syscalls
bge
-
syscall_enosys
system_call
:
/
*
label
this
so
stack
traces
look
sane
*/
/*
*
Need
to
vector
to
32
Bit
or
default
sys_call_table
here
,
*
based
on
caller
's run-mode / personality.
*/
ld
r11
,
.
SYS_CALL_TABLE
@
toc
(
2
)
andi
.
r10
,
r10
,
_TIF_32BIT
beq
15
f
ld
r11
,
.
SYS_CALL_TABLE32
@
toc
(
2
)
clrldi
r3
,
r3
,
32
clrldi
r4
,
r4
,
32
clrldi
r5
,
r5
,
32
clrldi
r6
,
r6
,
32
clrldi
r7
,
r7
,
32
clrldi
r8
,
r8
,
32
15
:
slwi
r0
,
r0
,
3
ldx
r10
,
r11
,
r0
/*
Fetch
system
call
handler
[
ptr
]
*/
mtctr
r10
bctrl
/*
Call
handler
*/
syscall_exit
:
#ifdef SHOW_SYSCALLS
std
r3
,
GPR3
(
r1
)
bl
.
do_show_syscall_exit
ld
r3
,
GPR3
(
r1
)
#endif
std
r3
,
RESULT
(
r1
)
ld
r5
,
_CCR
(
r1
)
li
r10
,-
_LAST_ERRNO
cmpld
r3
,
r10
clrrdi
r12
,
r1
,
THREAD_SHIFT
bge
-
syscall_error
syscall_error_cont
:
/
*
check
for
syscall
tracing
or
audit
*/
ld
r9
,
TI_FLAGS
(
r12
)
andi
.
r0
,
r9
,(
_TIF_SYSCALL_T_OR_A
|
_TIF_SINGLESTEP
)
bne
-
syscall_exit_trace
syscall_exit_trace_cont
:
/
*
disable
interrupts
so
current_thread_info
()->
flags
can
't change,
and
so
that
we
don
't get interrupted after loading SRR0/1. */
ld
r8
,
_MSR
(
r1
)
andi
.
r10
,
r8
,
MSR_RI
beq
-
unrecov_restore
mfmsr
r10
rldicl
r10
,
r10
,
48
,
1
rotldi
r10
,
r10
,
16
mtmsrd
r10
,
1
ld
r9
,
TI_FLAGS
(
r12
)
andi
.
r0
,
r9
,(
_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED
)
bne
-
syscall_exit_work
ld
r7
,
_NIP
(
r1
)
stdcx
.
r0
,
0
,
r1
/*
to
clear
the
reservation
*/
andi
.
r6
,
r8
,
MSR_PR
ld
r4
,
_LINK
(
r1
)
beq
-
1
f
/*
only
restore
r13
if
*/
ld
r13
,
GPR13
(
r1
)
/*
returning
to
usermode
*/
1
:
ld
r2
,
GPR2
(
r1
)
li
r12
,
MSR_RI
andc
r10
,
r10
,
r12
mtmsrd
r10
,
1
/*
clear
MSR
.
RI
*/
ld
r1
,
GPR1
(
r1
)
mtlr
r4
mtcr
r5
mtspr
SPRN_SRR0
,
r7
mtspr
SPRN_SRR1
,
r8
rfid
b
.
/*
prevent
speculative
execution
*/
syscall_enosys
:
li
r3
,-
ENOSYS
std
r3
,
RESULT
(
r1
)
clrrdi
r12
,
r1
,
THREAD_SHIFT
ld
r5
,
_CCR
(
r1
)
syscall_error
:
lbz
r11
,
TI_SC_NOERR
(
r12
)
cmpwi
0
,
r11
,
0
bne
-
syscall_error_cont
neg
r3
,
r3
oris
r5
,
r5
,
0x1000
/*
Set
SO
bit
in
CR
*/
std
r5
,
_CCR
(
r1
)
b
syscall_error_cont
/*
Traced
system
call
support
*/
syscall_dotrace
:
bl
.
save_nvgprs
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
do_syscall_trace_enter
ld
r0
,
GPR0
(
r1
)
/*
Restore
original
registers
*/
ld
r3
,
GPR3
(
r1
)
ld
r4
,
GPR4
(
r1
)
ld
r5
,
GPR5
(
r1
)
ld
r6
,
GPR6
(
r1
)
ld
r7
,
GPR7
(
r1
)
ld
r8
,
GPR8
(
r1
)
addi
r9
,
r1
,
STACK_FRAME_OVERHEAD
clrrdi
r10
,
r1
,
THREAD_SHIFT
ld
r10
,
TI_FLAGS
(
r10
)
b
syscall_dotrace_cont
syscall_exit_trace
:
std
r3
,
GPR3
(
r1
)
bl
.
save_nvgprs
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
do_syscall_trace_leave
REST_NVGPRS
(
r1
)
ld
r3
,
GPR3
(
r1
)
ld
r5
,
_CCR
(
r1
)
clrrdi
r12
,
r1
,
THREAD_SHIFT
b
syscall_exit_trace_cont
/*
Stuff
to
do
on
exit
from
a
system
call
.
*/
syscall_exit_work
:
std
r3
,
GPR3
(
r1
)
std
r5
,
_CCR
(
r1
)
b
.
ret_from_except_lite
/*
Save
non
-
volatile
GPRs
,
if
not
already
saved
.
*/
_GLOBAL
(
save_nvgprs
)
ld
r11
,
_TRAP
(
r1
)
andi
.
r0
,
r11
,
1
beqlr
-
SAVE_NVGPRS
(
r1
)
clrrdi
r0
,
r11
,
1
std
r0
,
_TRAP
(
r1
)
blr
/*
*
The
sigsuspend
and
rt_sigsuspend
system
calls
can
call
do_signal
*
and
thus
put
the
process
into
the
stopped
state
where
we
might
*
want
to
examine
its
user
state
with
ptrace
.
Therefore
we
need
*
to
save
all
the
nonvolatile
registers
(
r14
-
r31
)
before
calling
*
the
C
code
.
Similarly
,
fork
,
vfork
and
clone
need
the
full
*
register
state
on
the
stack
so
that
it
can
be
copied
to
the
child
.
*/
_GLOBAL
(
ppc32_sigsuspend
)
bl
.
save_nvgprs
bl
.
compat_sys_sigsuspend
b
70
f
_GLOBAL
(
ppc64_rt_sigsuspend
)
bl
.
save_nvgprs
bl
.
sys_rt_sigsuspend
b
70
f
_GLOBAL
(
ppc32_rt_sigsuspend
)
bl
.
save_nvgprs
bl
.
compat_sys_rt_sigsuspend
70
:
cmpdi
0
,
r3
,
0
/
*
If
it
returned
an
error
,
we
need
to
return
via
syscall_exit
to
set
the
SO
bit
in
cr0
and
potentially
stop
for
ptrace
.
*/
bne
syscall_exit
/
*
If
sigsuspend
()
returns
zero
,
we
are
going
into
a
signal
handler
.
We
may
need
to
call
audit_syscall_exit
()
to
mark
the
exit
from
sigsuspend
()
*/
#ifdef CONFIG_AUDIT
ld
r3
,
PACACURRENT
(
r13
)
ld
r4
,
AUDITCONTEXT
(
r3
)
cmpdi
0
,
r4
,
0
beq
.
ret_from_except
/*
No
audit_context
:
Leave
immediately
.
*/
li
r4
,
2
/*
AUDITSC_FAILURE
*/
li
r5
,-
4
/*
It
's always -EINTR */
bl
.
audit_syscall_exit
#endif
b
.
ret_from_except
_GLOBAL
(
ppc_fork
)
bl
.
save_nvgprs
bl
.
sys_fork
b
syscall_exit
_GLOBAL
(
ppc_vfork
)
bl
.
save_nvgprs
bl
.
sys_vfork
b
syscall_exit
_GLOBAL
(
ppc_clone
)
bl
.
save_nvgprs
bl
.
sys_clone
b
syscall_exit
_GLOBAL
(
ppc32_swapcontext
)
bl
.
save_nvgprs
bl
.
compat_sys_swapcontext
b
80
f
_GLOBAL
(
ppc64_swapcontext
)
bl
.
save_nvgprs
bl
.
sys_swapcontext
b
80
f
_GLOBAL
(
ppc32_sigreturn
)
bl
.
compat_sys_sigreturn
b
80
f
_GLOBAL
(
ppc32_rt_sigreturn
)
bl
.
compat_sys_rt_sigreturn
b
80
f
_GLOBAL
(
ppc64_rt_sigreturn
)
bl
.
sys_rt_sigreturn
80
:
cmpdi
0
,
r3
,
0
blt
syscall_exit
clrrdi
r4
,
r1
,
THREAD_SHIFT
ld
r4
,
TI_FLAGS
(
r4
)
andi
.
r4
,
r4
,(
_TIF_SYSCALL_T_OR_A
|
_TIF_SINGLESTEP
)
beq
+
81
f
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
do_syscall_trace_leave
81
:
b
.
ret_from_except
_GLOBAL
(
ret_from_fork
)
bl
.
schedule_tail
REST_NVGPRS
(
r1
)
li
r3
,
0
b
syscall_exit
/*
*
This
routine
switches
between
two
different
tasks
.
The
process
*
state
of
one
is
saved
on
its
kernel
stack
.
Then
the
state
*
of
the
other
is
restored
from
its
kernel
stack
.
The
memory
*
management
hardware
is
updated
to
the
second
process
's state.
*
Finally
,
we
can
return
to
the
second
process
,
via
ret_from_except
.
*
On
entry
,
r3
points
to
the
THREAD
for
the
current
task
,
r4
*
points
to
the
THREAD
for
the
new
task
.
*
*
Note
:
there
are
two
ways
to
get
to
the
"going out"
portion
*
of
this
code
; either by coming in via the entry (_switch)
*
or
via
"fork"
which
must
set
up
an
environment
equivalent
*
to
the
"_switch"
path
.
If
you
change
this
you
'll have to change
*
the
fork
code
also
.
*
*
The
code
which
creates
the
new
task
context
is
in
'copy_thread'
*
in
arch
/
ppc64
/
kernel
/
process
.
c
*/
.
align
7
_GLOBAL
(
_switch
)
mflr
r0
std
r0
,
16
(
r1
)
stdu
r1
,-
SWITCH_FRAME_SIZE
(
r1
)
/
*
r3
-
r13
are
caller
saved
--
Cort
*/
SAVE_8GPRS
(14,
r1
)
SAVE_10GPRS
(22,
r1
)
mflr
r20
/*
Return
to
switch
caller
*/
mfmsr
r22
li
r0
,
MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris
r0
,
r0
,
MSR_VEC
@
h
/*
Disable
altivec
*/
mfspr
r24
,
SPRN_VRSAVE
/*
save
vrsave
register
value
*/
std
r24
,
THREAD_VRSAVE
(
r3
)
END_FTR_SECTION_IFSET
(
CPU_FTR_ALTIVEC
)
#endif /* CONFIG_ALTIVEC */
and
.
r0
,
r0
,
r22
beq
+
1
f
andc
r22
,
r22
,
r0
mtmsrd
r22
isync
1
:
std
r20
,
_NIP
(
r1
)
mfcr
r23
std
r23
,
_CCR
(
r1
)
std
r1
,
KSP
(
r3
)
/*
Set
old
stack
pointer
*/
#ifdef CONFIG_SMP
/
*
We
need
a
sync
somewhere
here
to
make
sure
that
if
the
*
previous
task
gets
rescheduled
on
another
CPU
,
it
sees
all
*
stores
it
has
performed
on
this
one
.
*/
sync
#endif /* CONFIG_SMP */
addi
r6
,
r4
,-
THREAD
/*
Convert
THREAD
to
'current'
*/
std
r6
,
PACACURRENT
(
r13
)
/*
Set
new
'current'
*/
ld
r8
,
KSP
(
r4
)
/*
new
stack
pointer
*/
BEGIN_FTR_SECTION
clrrdi
r6
,
r8
,
28
/*
get
its
ESID
*/
clrrdi
r9
,
r1
,
28
/*
get
current
sp
ESID
*/
clrldi
.
r0
,
r6
,
2
/*
is
new
ESID
c00000000
?
*/
cmpd
cr1
,
r6
,
r9
/*
or
is
new
ESID
the
same
as
current
ESID
?
*/
cror
eq
,
4
*
cr1
+
eq
,
eq
beq
2
f
/*
if
yes
,
don
't slbie it */
/
*
Bolt
in
the
new
stack
SLB
entry
*/
ld
r7
,
KSP_VSID
(
r4
)
/*
Get
new
stack
's VSID */
oris
r0
,
r6
,(
SLB_ESID_V
)
@
h
ori
r0
,
r0
,(
SLB_NUM_BOLTED
-
1
)
@
l
slbie
r6
slbie
r6
/*
Workaround
POWER5
<
DD2
.1
issue
*/
slbmte
r7
,
r0
isync
2
:
END_FTR_SECTION_IFSET
(
CPU_FTR_SLB
)
clrrdi
r7
,
r8
,
THREAD_SHIFT
/*
base
of
new
stack
*/
/
*
Note
:
this
uses
SWITCH_FRAME_SIZE
rather
than
INT_FRAME_SIZE
because
we
don
't need to leave the 288-byte ABI gap at the
top
of
the
kernel
stack
.
*/
addi
r7
,
r7
,
THREAD_SIZE
-
SWITCH_FRAME_SIZE
mr
r1
,
r8
/*
start
using
new
stack
pointer
*/
std
r7
,
PACAKSAVE
(
r13
)
ld
r6
,
_CCR
(
r1
)
mtcrf
0xFF
,
r6
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
ld
r0
,
THREAD_VRSAVE
(
r4
)
mtspr
SPRN_VRSAVE
,
r0
/*
if
G4
,
restore
VRSAVE
reg
*/
END_FTR_SECTION_IFSET
(
CPU_FTR_ALTIVEC
)
#endif /* CONFIG_ALTIVEC */
/
*
r3
-
r13
are
destroyed
--
Cort
*/
REST_8GPRS
(14,
r1
)
REST_10GPRS
(22,
r1
)
/
*
convert
old
thread
to
its
task_struct
for
return
value
*/
addi
r3
,
r3
,-
THREAD
ld
r7
,
_NIP
(
r1
)
/*
Return
to
_switch
caller
in
new
task
*/
mtlr
r7
addi
r1
,
r1
,
SWITCH_FRAME_SIZE
blr
.
align
7
_GLOBAL
(
ret_from_except
)
ld
r11
,
_TRAP
(
r1
)
andi
.
r0
,
r11
,
1
bne
.
ret_from_except_lite
REST_NVGPRS
(
r1
)
_GLOBAL
(
ret_from_except_lite
)
/
*
*
Disable
interrupts
so
that
current_thread_info
()->
flags
*
can
't change between when we test it and when we return
*
from
the
interrupt
.
*/
mfmsr
r10
/*
Get
current
interrupt
state
*/
rldicl
r9
,
r10
,
48
,
1
/*
clear
MSR_EE
*/
rotldi
r9
,
r9
,
16
mtmsrd
r9
,
1
/*
Update
machine
state
*/
#ifdef CONFIG_PREEMPT
clrrdi
r9
,
r1
,
THREAD_SHIFT
/*
current_thread_info
()
*/
li
r0
,
_TIF_NEED_RESCHED
/*
bits
to
check
*/
ld
r3
,
_MSR
(
r1
)
ld
r4
,
TI_FLAGS
(
r9
)
/
*
Move
MSR_PR
bit
in
r3
to
_TIF_SIGPENDING
position
in
r0
*/
rlwimi
r0
,
r3
,
32
+
TIF_SIGPENDING
-
MSR_PR_LG
,
_TIF_SIGPENDING
and
.
r0
,
r4
,
r0
/*
check
NEED_RESCHED
and
maybe
SIGPENDING
*/
bne
do_work
#else /* !CONFIG_PREEMPT */
ld
r3
,
_MSR
(
r1
)
/*
Returning
to
user
mode
?
*/
andi
.
r3
,
r3
,
MSR_PR
beq
restore
/*
if
not
,
just
restore
regs
and
return
*/
/
*
Check
current_thread_info
()->
flags
*/
clrrdi
r9
,
r1
,
THREAD_SHIFT
ld
r4
,
TI_FLAGS
(
r9
)
andi
.
r0
,
r4
,
_TIF_USER_WORK_MASK
bne
do_work
#endif
restore
:
#ifdef CONFIG_PPC_ISERIES
ld
r5
,
SOFTE
(
r1
)
cmpdi
0
,
r5
,
0
beq
4
f
/
*
Check
for
pending
interrupts
(
iSeries
)
*/
ld
r3
,
PACALPPACA
+
LPPACAANYINT
(
r13
)
cmpdi
r3
,
0
beq
+
4
f
/*
skip
do_IRQ
if
no
interrupts
*/
li
r3
,
0
stb
r3
,
PACAPROCENABLED
(
r13
)
/*
ensure
we
are
soft
-
disabled
*/
ori
r10
,
r10
,
MSR_EE
mtmsrd
r10
/*
hard
-
enable
again
*/
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
do_IRQ
b
.
ret_from_except_lite
/*
loop
back
and
handle
more
*/
4
:
stb
r5
,
PACAPROCENABLED
(
r13
)
#endif
ld
r3
,
_MSR
(
r1
)
andi
.
r0
,
r3
,
MSR_RI
beq
-
unrecov_restore
andi
.
r0
,
r3
,
MSR_PR
/
*
*
r13
is
our
per
cpu
area
,
only
restore
it
if
we
are
returning
to
*
userspace
*/
beq
1
f
REST_GPR
(13,
r1
)
1
:
ld
r3
,
_CTR
(
r1
)
ld
r0
,
_LINK
(
r1
)
mtctr
r3
mtlr
r0
ld
r3
,
_XER
(
r1
)
mtspr
SPRN_XER
,
r3
REST_8GPRS
(5,
r1
)
stdcx
.
r0
,
0
,
r1
/*
to
clear
the
reservation
*/
mfmsr
r0
li
r2
,
MSR_RI
andc
r0
,
r0
,
r2
mtmsrd
r0
,
1
ld
r0
,
_MSR
(
r1
)
mtspr
SPRN_SRR1
,
r0
ld
r2
,
_CCR
(
r1
)
mtcrf
0xFF
,
r2
ld
r2
,
_NIP
(
r1
)
mtspr
SPRN_SRR0
,
r2
ld
r0
,
GPR0
(
r1
)
ld
r2
,
GPR2
(
r1
)
ld
r3
,
GPR3
(
r1
)
ld
r4
,
GPR4
(
r1
)
ld
r1
,
GPR1
(
r1
)
rfid
b
.
/*
prevent
speculative
execution
*/
/*
Note
:
this
must
change
if
we
start
using
the
TIF_NOTIFY_RESUME
bit
*/
do_work
:
#ifdef CONFIG_PREEMPT
andi
.
r0
,
r3
,
MSR_PR
/*
Returning
to
user
mode
?
*/
bne
user_work
/
*
Check
that
preempt_count
()
==
0
and
interrupts
are
enabled
*/
lwz
r8
,
TI_PREEMPT
(
r9
)
cmpwi
cr1
,
r8
,
0
#ifdef CONFIG_PPC_ISERIES
ld
r0
,
SOFTE
(
r1
)
cmpdi
r0
,
0
#else
andi
.
r0
,
r3
,
MSR_EE
#endif
crandc
eq
,
cr1
*
4
+
eq
,
eq
bne
restore
/
*
here
we
are
preempting
the
current
task
*/
1
:
#ifdef CONFIG_PPC_ISERIES
li
r0
,
1
stb
r0
,
PACAPROCENABLED
(
r13
)
#endif
ori
r10
,
r10
,
MSR_EE
mtmsrd
r10
,
1
/*
reenable
interrupts
*/
bl
.
preempt_schedule
mfmsr
r10
clrrdi
r9
,
r1
,
THREAD_SHIFT
rldicl
r10
,
r10
,
48
,
1
/*
disable
interrupts
again
*/
rotldi
r10
,
r10
,
16
mtmsrd
r10
,
1
ld
r4
,
TI_FLAGS
(
r9
)
andi
.
r0
,
r4
,
_TIF_NEED_RESCHED
bne
1
b
b
restore
user_work
:
#endif
/
*
Enable
interrupts
*/
ori
r10
,
r10
,
MSR_EE
mtmsrd
r10
,
1
andi
.
r0
,
r4
,
_TIF_NEED_RESCHED
beq
1
f
bl
.
schedule
b
.
ret_from_except_lite
1
:
bl
.
save_nvgprs
li
r3
,
0
addi
r4
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
do_signal
b
.
ret_from_except
unrecov_restore
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
unrecoverable_exception
b
unrecov_restore
#ifdef CONFIG_PPC_RTAS
/*
*
On
CHRP
,
the
Run
-
Time
Abstraction
Services
(
RTAS
)
have
to
be
*
called
with
the
MMU
off
.
*
*
In
addition
,
we
need
to
be
in
32
b
mode
,
at
least
for
now
.
*
*
Note
:
r3
is
an
input
parameter
to
rtas
,
so
don
't trash it...
*/
_GLOBAL
(
enter_rtas
)
mflr
r0
std
r0
,
16
(
r1
)
stdu
r1
,-
RTAS_FRAME_SIZE
(
r1
)
/*
Save
SP
and
create
stack
space
.
*/
/
*
Because
RTAS
is
running
in
32
b
mode
,
it
clobbers
the
high
order
half
*
of
all
registers
that
it
saves
.
We
therefore
save
those
registers
*
RTAS
might
touch
to
the
stack
.
(
r0
,
r3
-
r13
are
caller
saved
)
*/
SAVE_GPR
(2,
r1
)
/*
Save
the
TOC
*/
SAVE_GPR
(13,
r1
)
/*
Save
paca
*/
SAVE_8GPRS
(14,
r1
)
/*
Save
the
non
-
volatiles
*/
SAVE_10GPRS
(22,
r1
)
/*
ditto
*/
mfcr
r4
std
r4
,
_CCR
(
r1
)
mfctr
r5
std
r5
,
_CTR
(
r1
)
mfspr
r6
,
SPRN_XER
std
r6
,
_XER
(
r1
)
mfdar
r7
std
r7
,
_DAR
(
r1
)
mfdsisr
r8
std
r8
,
_DSISR
(
r1
)
mfsrr0
r9
std
r9
,
_SRR0
(
r1
)
mfsrr1
r10
std
r10
,
_SRR1
(
r1
)
/
*
There
is
no
way
it
is
acceptable
to
get
here
with
interrupts
enabled
,
*
check
it
with
the
asm
equivalent
of
WARN_ON
*/
mfmsr
r6
andi
.
r0
,
r6
,
MSR_EE
1
:
tdnei
r0
,
0
.
section
__bug_table
,"
a
"
.
llong
1
b
,
__LINE__
+
0x1000000
,
1
f
,
2
f
.
previous
.
section
.
rodata
,"
a
"
1
:
.
asciz
__FILE__
2
:
.
asciz
"enter_rtas"
.
previous
/
*
Unfortunately
,
the
stack
pointer
and
the
MSR
are
also
clobbered
,
*
so
they
are
saved
in
the
PACA
which
allows
us
to
restore
*
our
original
state
after
RTAS
returns
.
*/
std
r1
,
PACAR1
(
r13
)
std
r6
,
PACASAVEDMSR
(
r13
)
/
*
Setup
our
real
return
addr
*/
SET_REG_TO_LABEL
(
r4
,.
rtas_return_loc
)
SET_REG_TO_CONST
(
r9
,
KERNELBASE
)
sub
r4
,
r4
,
r9
mtlr
r4
li
r0
,
0
ori
r0
,
r0
,
MSR_EE|MSR_SE|MSR_BE
|
MSR_RI
andc
r0
,
r6
,
r0
li
r9
,
1
rldicr
r9
,
r9
,
MSR_SF_LG
,(
63
-
MSR_SF_LG
)
ori
r9
,
r9
,
MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
andc
r6
,
r0
,
r9
ori
r6
,
r6
,
MSR_RI
sync
/*
disable
interrupts
so
SRR0
/
1
*/
mtmsrd
r0
/*
don
't get trashed */
SET_REG_TO_LABEL
(
r4
,
rtas
)
ld
r5
,
RTASENTRY
(
r4
)
/*
get
the
rtas
->
entry
value
*/
ld
r4
,
RTASBASE
(
r4
)
/*
get
the
rtas
->
base
value
*/
mtspr
SPRN_SRR0
,
r5
mtspr
SPRN_SRR1
,
r6
rfid
b
.
/*
prevent
speculative
execution
*/
_STATIC
(
rtas_return_loc
)
/
*
relocation
is
off
at
this
point
*/
mfspr
r4
,
SPRN_SPRG3
/*
Get
PACA
*/
SET_REG_TO_CONST
(
r5
,
KERNELBASE
)
sub
r4
,
r4
,
r5
/*
RELOC
the
PACA
base
pointer
*/
mfmsr
r6
li
r0
,
MSR_RI
andc
r6
,
r6
,
r0
sync
mtmsrd
r6
ld
r1
,
PACAR1
(
r4
)
/*
Restore
our
SP
*/
LOADADDR
(
r3
,.
rtas_restore_regs
)
ld
r4
,
PACASAVEDMSR
(
r4
)
/*
Restore
our
MSR
*/
mtspr
SPRN_SRR0
,
r3
mtspr
SPRN_SRR1
,
r4
rfid
b
.
/*
prevent
speculative
execution
*/
_STATIC
(
rtas_restore_regs
)
/
*
relocation
is
on
at
this
point
*/
REST_GPR
(2,
r1
)
/*
Restore
the
TOC
*/
REST_GPR
(13,
r1
)
/*
Restore
paca
*/
REST_8GPRS
(14,
r1
)
/*
Restore
the
non
-
volatiles
*/
REST_10GPRS
(22,
r1
)
/*
ditto
*/
mfspr
r13
,
SPRN_SPRG3
ld
r4
,
_CCR
(
r1
)
mtcr
r4
ld
r5
,
_CTR
(
r1
)
mtctr
r5
ld
r6
,
_XER
(
r1
)
mtspr
SPRN_XER
,
r6
ld
r7
,
_DAR
(
r1
)
mtdar
r7
ld
r8
,
_DSISR
(
r1
)
mtdsisr
r8
ld
r9
,
_SRR0
(
r1
)
mtsrr0
r9
ld
r10
,
_SRR1
(
r1
)
mtsrr1
r10
addi
r1
,
r1
,
RTAS_FRAME_SIZE
/*
Unstack
our
frame
*/
ld
r0
,
16
(
r1
)
/*
get
return
address
*/
mtlr
r0
blr
/*
return
to
caller
*/
#endif /* CONFIG_PPC_RTAS */
#ifdef CONFIG_PPC_MULTIPLATFORM
_GLOBAL
(
enter_prom
)
mflr
r0
std
r0
,
16
(
r1
)
stdu
r1
,-
PROM_FRAME_SIZE
(
r1
)
/*
Save
SP
and
create
stack
space
*/
/
*
Because
PROM
is
running
in
32
b
mode
,
it
clobbers
the
high
order
half
*
of
all
registers
that
it
saves
.
We
therefore
save
those
registers
*
PROM
might
touch
to
the
stack
.
(
r0
,
r3
-
r13
are
caller
saved
)
*/
SAVE_8GPRS
(2,
r1
)
SAVE_GPR
(13,
r1
)
SAVE_8GPRS
(14,
r1
)
SAVE_10GPRS
(22,
r1
)
mfcr
r4
std
r4
,
_CCR
(
r1
)
mfctr
r5
std
r5
,
_CTR
(
r1
)
mfspr
r6
,
SPRN_XER
std
r6
,
_XER
(
r1
)
mfdar
r7
std
r7
,
_DAR
(
r1
)
mfdsisr
r8
std
r8
,
_DSISR
(
r1
)
mfsrr0
r9
std
r9
,
_SRR0
(
r1
)
mfsrr1
r10
std
r10
,
_SRR1
(
r1
)
mfmsr
r11
std
r11
,
_MSR
(
r1
)
/
*
Get
the
PROM
entrypoint
*/
ld
r0
,
GPR4
(
r1
)
mtlr
r0
/
*
Switch
MSR
to
32
bits
mode
*/
mfmsr
r11
li
r12
,
1
rldicr
r12
,
r12
,
MSR_SF_LG
,(
63
-
MSR_SF_LG
)
andc
r11
,
r11
,
r12
li
r12
,
1
rldicr
r12
,
r12
,
MSR_ISF_LG
,(
63
-
MSR_ISF_LG
)
andc
r11
,
r11
,
r12
mtmsrd
r11
isync
/
*
Restore
arguments
&
enter
PROM
here
...
*/
ld
r3
,
GPR3
(
r1
)
blrl
/
*
Just
make
sure
that
r1
top
32
bits
didn
't get
*
corrupt
by
OF
*/
rldicl
r1
,
r1
,
0
,
32
/
*
Restore
the
MSR
(
back
to
64
bits
)
*/
ld
r0
,
_MSR
(
r1
)
mtmsrd
r0
isync
/
*
Restore
other
registers
*/
REST_GPR
(2,
r1
)
REST_GPR
(13,
r1
)
REST_8GPRS
(14,
r1
)
REST_10GPRS
(22,
r1
)
ld
r4
,
_CCR
(
r1
)
mtcr
r4
ld
r5
,
_CTR
(
r1
)
mtctr
r5
ld
r6
,
_XER
(
r1
)
mtspr
SPRN_XER
,
r6
ld
r7
,
_DAR
(
r1
)
mtdar
r7
ld
r8
,
_DSISR
(
r1
)
mtdsisr
r8
ld
r9
,
_SRR0
(
r1
)
mtsrr0
r9
ld
r10
,
_SRR1
(
r1
)
mtsrr1
r10
addi
r1
,
r1
,
PROM_FRAME_SIZE
ld
r0
,
16
(
r1
)
mtlr
r0
blr
#endif /* CONFIG_PPC_MULTIPLATFORM */
arch/ppc64/kernel/misc.S
View file @
22b28032
...
...
@@ -867,566 +867,3 @@ _GLOBAL(kexec_sequence)
li
r5
,
0
blr
/*
image
->
start
(
physid
,
image
->
start
,
0
)
; */
#endif /* CONFIG_KEXEC */
/*
Why
isn
't this a) automatic, b) written in '
C
'? */
.
balign
8
_GLOBAL
(
sys_call_table32
)
.
llong
.
sys_restart_syscall
/*
0
*/
.
llong
.
sys_exit
.
llong
.
ppc_fork
.
llong
.
sys_read
.
llong
.
sys_write
.
llong
.
compat_sys_open
/*
5
*/
.
llong
.
sys_close
.
llong
.
compat_sys_waitpid
.
llong
.
compat_sys_creat
.
llong
.
sys_link
.
llong
.
sys_unlink
/*
10
*/
.
llong
.
compat_sys_execve
.
llong
.
sys_chdir
.
llong
.
compat_sys_time
.
llong
.
sys_mknod
.
llong
.
sys_chmod
/*
15
*/
.
llong
.
sys_lchown
.
llong
.
sys_ni_syscall
/*
old
break
syscall
*/
.
llong
.
sys_ni_syscall
/*
old
stat
syscall
*/
.
llong
.
ppc32_lseek
.
llong
.
sys_getpid
/*
20
*/
.
llong
.
compat_sys_mount
.
llong
.
sys_oldumount
.
llong
.
sys_setuid
.
llong
.
sys_getuid
.
llong
.
compat_sys_stime
/*
25
*/
.
llong
.
compat_sys_ptrace
.
llong
.
sys_alarm
.
llong
.
sys_ni_syscall
/*
old
fstat
syscall
*/
.
llong
.
compat_sys_pause
.
llong
.
compat_sys_utime
/*
30
*/
.
llong
.
sys_ni_syscall
/*
old
stty
syscall
*/
.
llong
.
sys_ni_syscall
/*
old
gtty
syscall
*/
.
llong
.
compat_sys_access
.
llong
.
compat_sys_nice
.
llong
.
sys_ni_syscall
/*
35
-
old
ftime
syscall
*/
.
llong
.
sys_sync
.
llong
.
compat_sys_kill
.
llong
.
sys_rename
.
llong
.
compat_sys_mkdir
.
llong
.
sys_rmdir
/*
40
*/
.
llong
.
sys_dup
.
llong
.
sys_pipe
.
llong
.
compat_sys_times
.
llong
.
sys_ni_syscall
/*
old
prof
syscall
*/
.
llong
.
sys_brk
/*
45
*/
.
llong
.
sys_setgid
.
llong
.
sys_getgid
.
llong
.
sys_signal
.
llong
.
sys_geteuid
.
llong
.
sys_getegid
/*
50
*/
.
llong
.
sys_acct
.
llong
.
sys_umount
.
llong
.
sys_ni_syscall
/*
old
lock
syscall
*/
.
llong
.
compat_sys_ioctl
.
llong
.
compat_sys_fcntl
/*
55
*/
.
llong
.
sys_ni_syscall
/*
old
mpx
syscall
*/
.
llong
.
compat_sys_setpgid
.
llong
.
sys_ni_syscall
/*
old
ulimit
syscall
*/
.
llong
.
sys_olduname
.
llong
.
compat_sys_umask
/*
60
*/
.
llong
.
sys_chroot
.
llong
.
sys_ustat
.
llong
.
sys_dup2
.
llong
.
sys_getppid
.
llong
.
sys_getpgrp
/*
65
*/
.
llong
.
sys_setsid
.
llong
.
compat_sys_sigaction
.
llong
.
sys_sgetmask
.
llong
.
compat_sys_ssetmask
.
llong
.
sys_setreuid
/*
70
*/
.
llong
.
sys_setregid
.
llong
.
ppc32_sigsuspend
.
llong
.
compat_sys_sigpending
.
llong
.
compat_sys_sethostname
.
llong
.
compat_sys_setrlimit
/*
75
*/
.
llong
.
compat_sys_old_getrlimit
.
llong
.
compat_sys_getrusage
.
llong
.
compat_sys_gettimeofday
.
llong
.
compat_sys_settimeofday
.
llong
.
compat_sys_getgroups
/*
80
*/
.
llong
.
compat_sys_setgroups
.
llong
.
sys_ni_syscall
/*
old
select
syscall
*/
.
llong
.
sys_symlink
.
llong
.
sys_ni_syscall
/*
old
lstat
syscall
*/
.
llong
.
compat_sys_readlink
/*
85
*/
.
llong
.
sys_uselib
.
llong
.
sys_swapon
.
llong
.
sys_reboot
.
llong
.
old32_readdir
.
llong
.
sys_mmap
/*
90
*/
.
llong
.
sys_munmap
.
llong
.
sys_truncate
.
llong
.
sys_ftruncate
.
llong
.
sys_fchmod
.
llong
.
sys_fchown
/*
95
*/
.
llong
.
compat_sys_getpriority
.
llong
.
compat_sys_setpriority
.
llong
.
sys_ni_syscall
/*
old
profil
syscall
*/
.
llong
.
compat_sys_statfs
.
llong
.
compat_sys_fstatfs
/*
100
*/
.
llong
.
sys_ni_syscall
/*
old
ioperm
syscall
*/
.
llong
.
compat_sys_socketcall
.
llong
.
compat_sys_syslog
.
llong
.
compat_sys_setitimer
.
llong
.
compat_sys_getitimer
/*
105
*/
.
llong
.
compat_sys_newstat
.
llong
.
compat_sys_newlstat
.
llong
.
compat_sys_newfstat
.
llong
.
sys_uname
.
llong
.
sys_ni_syscall
/*
110
old
iopl
syscall
*/
.
llong
.
sys_vhangup
.
llong
.
sys_ni_syscall
/*
old
idle
syscall
*/
.
llong
.
sys_ni_syscall
/*
old
vm86
syscall
*/
.
llong
.
compat_sys_wait4
.
llong
.
sys_swapoff
/*
115
*/
.
llong
.
compat_sys_sysinfo
.
llong
.
sys32_ipc
.
llong
.
sys_fsync
.
llong
.
ppc32_sigreturn
.
llong
.
ppc_clone
/*
120
*/
.
llong
.
compat_sys_setdomainname
.
llong
.
ppc_newuname
.
llong
.
sys_ni_syscall
/*
old
modify_ldt
syscall
*/
.
llong
.
compat_sys_adjtimex
.
llong
.
sys_mprotect
/*
125
*/
.
llong
.
compat_sys_sigprocmask
.
llong
.
sys_ni_syscall
/*
old
create_module
syscall
*/
.
llong
.
sys_init_module
.
llong
.
sys_delete_module
.
llong
.
sys_ni_syscall
/*
130
old
get_kernel_syms
syscall
*/
.
llong
.
sys_quotactl
.
llong
.
compat_sys_getpgid
.
llong
.
sys_fchdir
.
llong
.
sys_bdflush
.
llong
.
compat_sys_sysfs
/*
135
*/
.
llong
.
ppc64_personality
.
llong
.
sys_ni_syscall
/*
for
afs_syscall
*/
.
llong
.
sys_setfsuid
.
llong
.
sys_setfsgid
.
llong
.
sys_llseek
/*
140
*/
.
llong
.
compat_sys_getdents
.
llong
.
ppc32_select
.
llong
.
sys_flock
.
llong
.
sys_msync
.
llong
.
compat_sys_readv
/*
145
*/
.
llong
.
compat_sys_writev
.
llong
.
compat_sys_getsid
.
llong
.
sys_fdatasync
.
llong
.
compat_sys_sysctl
.
llong
.
sys_mlock
/*
150
*/
.
llong
.
sys_munlock
.
llong
.
sys_mlockall
.
llong
.
sys_munlockall
.
llong
.
compat_sys_sched_setparam
.
llong
.
compat_sys_sched_getparam
/*
155
*/
.
llong
.
compat_sys_sched_setscheduler
.
llong
.
compat_sys_sched_getscheduler
.
llong
.
sys_sched_yield
.
llong
.
compat_sys_sched_get_priority_max
.
llong
.
compat_sys_sched_get_priority_min
/*
160
*/
.
llong
.
compat_sys_sched_rr_get_interval
.
llong
.
compat_sys_nanosleep
.
llong
.
sys_mremap
.
llong
.
sys_setresuid
.
llong
.
sys_getresuid
/*
165
*/
.
llong
.
sys_ni_syscall
/*
old
query_module
syscall
*/
.
llong
.
sys_poll
.
llong
.
compat_sys_nfsservctl
.
llong
.
sys_setresgid
.
llong
.
sys_getresgid
/*
170
*/
.
llong
.
compat_sys_prctl
.
llong
.
ppc32_rt_sigreturn
.
llong
.
compat_sys_rt_sigaction
.
llong
.
compat_sys_rt_sigprocmask
.
llong
.
compat_sys_rt_sigpending
/*
175
*/
.
llong
.
compat_sys_rt_sigtimedwait
.
llong
.
compat_sys_rt_sigqueueinfo
.
llong
.
ppc32_rt_sigsuspend
.
llong
.
compat_sys_pread64
.
llong
.
compat_sys_pwrite64
/*
180
*/
.
llong
.
sys_chown
.
llong
.
sys_getcwd
.
llong
.
sys_capget
.
llong
.
sys_capset
.
llong
.
compat_sys_sigaltstack
/*
185
*/
.
llong
.
compat_sys_sendfile
.
llong
.
sys_ni_syscall
/*
reserved
for
streams1
*/
.
llong
.
sys_ni_syscall
/*
reserved
for
streams2
*/
.
llong
.
ppc_vfork
.
llong
.
compat_sys_getrlimit
/*
190
*/
.
llong
.
compat_sys_readahead
.
llong
.
compat_sys_mmap2
.
llong
.
compat_sys_truncate64
.
llong
.
compat_sys_ftruncate64
.
llong
.
sys_stat64
/*
195
*/
.
llong
.
sys_lstat64
.
llong
.
sys_fstat64
.
llong
.
compat_sys_pciconfig_read
.
llong
.
compat_sys_pciconfig_write
.
llong
.
compat_sys_pciconfig_iobase
/*
200
-
pciconfig_iobase
*/
.
llong
.
sys_ni_syscall
/*
reserved
for
MacOnLinux
*/
.
llong
.
sys_getdents64
.
llong
.
sys_pivot_root
.
llong
.
compat_sys_fcntl64
.
llong
.
sys_madvise
/*
205
*/
.
llong
.
sys_mincore
.
llong
.
sys_gettid
.
llong
.
sys_tkill
.
llong
.
sys_setxattr
.
llong
.
sys_lsetxattr
/*
210
*/
.
llong
.
sys_fsetxattr
.
llong
.
sys_getxattr
.
llong
.
sys_lgetxattr
.
llong
.
sys_fgetxattr
.
llong
.
sys_listxattr
/*
215
*/
.
llong
.
sys_llistxattr
.
llong
.
sys_flistxattr
.
llong
.
sys_removexattr
.
llong
.
sys_lremovexattr
.
llong
.
sys_fremovexattr
/*
220
*/
.
llong
.
compat_sys_futex
.
llong
.
compat_sys_sched_setaffinity
.
llong
.
compat_sys_sched_getaffinity
.
llong
.
sys_ni_syscall
.
llong
.
sys_ni_syscall
/*
225
-
reserved
for
tux
*/
.
llong
.
compat_sys_sendfile64
.
llong
.
compat_sys_io_setup
.
llong
.
sys_io_destroy
.
llong
.
compat_sys_io_getevents
.
llong
.
compat_sys_io_submit
.
llong
.
sys_io_cancel
.
llong
.
sys_set_tid_address
.
llong
.
ppc32_fadvise64
.
llong
.
sys_exit_group
.
llong
.
ppc32_lookup_dcookie
/*
235
*/
.
llong
.
sys_epoll_create
.
llong
.
sys_epoll_ctl
.
llong
.
sys_epoll_wait
.
llong
.
sys_remap_file_pages
.
llong
.
ppc32_timer_create
/*
240
*/
.
llong
.
compat_sys_timer_settime
.
llong
.
compat_sys_timer_gettime
.
llong
.
sys_timer_getoverrun
.
llong
.
sys_timer_delete
.
llong
.
compat_sys_clock_settime
/*
245
*/
.
llong
.
compat_sys_clock_gettime
.
llong
.
compat_sys_clock_getres
.
llong
.
compat_sys_clock_nanosleep
.
llong
.
ppc32_swapcontext
.
llong
.
compat_sys_tgkill
/*
250
*/
.
llong
.
compat_sys_utimes
.
llong
.
compat_sys_statfs64
.
llong
.
compat_sys_fstatfs64
.
llong
.
ppc_fadvise64_64
/*
32
bit
only
fadvise64_64
*/
.
llong
.
ppc_rtas
/*
255
*/
.
llong
.
sys_ni_syscall
/*
256
reserved
for
sys_debug_setcontext
*/
.
llong
.
sys_ni_syscall
/*
257
reserved
for
vserver
*/
.
llong
.
sys_ni_syscall
/*
258
reserved
for
new
sys_remap_file_pages
*/
.
llong
.
compat_sys_mbind
.
llong
.
compat_sys_get_mempolicy
/*
260
*/
.
llong
.
compat_sys_set_mempolicy
.
llong
.
compat_sys_mq_open
.
llong
.
sys_mq_unlink
.
llong
.
compat_sys_mq_timedsend
.
llong
.
compat_sys_mq_timedreceive
/*
265
*/
.
llong
.
compat_sys_mq_notify
.
llong
.
compat_sys_mq_getsetattr
.
llong
.
compat_sys_kexec_load
.
llong
.
compat_sys_add_key
.
llong
.
compat_sys_request_key
/*
270
*/
.
llong
.
compat_sys_keyctl
.
llong
.
compat_sys_waitid
.
llong
.
compat_sys_ioprio_set
.
llong
.
compat_sys_ioprio_get
.
llong
.
sys_inotify_init
/*
275
*/
.
llong
.
sys_inotify_add_watch
.
llong
.
sys_inotify_rm_watch
.
balign
8
_GLOBAL
(
sys_call_table
)
.
llong
.
sys_restart_syscall
/*
0
*/
.
llong
.
sys_exit
.
llong
.
ppc_fork
.
llong
.
sys_read
.
llong
.
sys_write
.
llong
.
sys_open
/*
5
*/
.
llong
.
sys_close
.
llong
.
sys_waitpid
.
llong
.
sys_creat
.
llong
.
sys_link
.
llong
.
sys_unlink
/*
10
*/
.
llong
.
sys_execve
.
llong
.
sys_chdir
.
llong
.
sys64_time
.
llong
.
sys_mknod
.
llong
.
sys_chmod
/*
15
*/
.
llong
.
sys_lchown
.
llong
.
sys_ni_syscall
/*
old
break
syscall
*/
.
llong
.
sys_ni_syscall
/*
old
stat
syscall
*/
.
llong
.
sys_lseek
.
llong
.
sys_getpid
/*
20
*/
.
llong
.
sys_mount
.
llong
.
sys_ni_syscall
/*
old
umount
syscall
*/
.
llong
.
sys_setuid
.
llong
.
sys_getuid
.
llong
.
sys_stime
/*
25
*/
.
llong
.
sys_ptrace
.
llong
.
sys_alarm
.
llong
.
sys_ni_syscall
/*
old
fstat
syscall
*/
.
llong
.
sys_pause
.
llong
.
sys_utime
/*
30
*/
.
llong
.
sys_ni_syscall
/*
old
stty
syscall
*/
.
llong
.
sys_ni_syscall
/*
old
gtty
syscall
*/
.
llong
.
sys_access
.
llong
.
sys_nice
.
llong
.
sys_ni_syscall
/*
35
-
old
ftime
syscall
*/
.
llong
.
sys_sync
.
llong
.
sys_kill
.
llong
.
sys_rename
.
llong
.
sys_mkdir
.
llong
.
sys_rmdir
/*
40
*/
.
llong
.
sys_dup
.
llong
.
sys_pipe
.
llong
.
sys_times
.
llong
.
sys_ni_syscall
/*
old
prof
syscall
*/
.
llong
.
sys_brk
/*
45
*/
.
llong
.
sys_setgid
.
llong
.
sys_getgid
.
llong
.
sys_signal
.
llong
.
sys_geteuid
.
llong
.
sys_getegid
/*
50
*/
.
llong
.
sys_acct
.
llong
.
sys_umount
.
llong
.
sys_ni_syscall
/*
old
lock
syscall
*/
.
llong
.
sys_ioctl
.
llong
.
sys_fcntl
/*
55
*/
.
llong
.
sys_ni_syscall
/*
old
mpx
syscall
*/
.
llong
.
sys_setpgid
.
llong
.
sys_ni_syscall
/*
old
ulimit
syscall
*/
.
llong
.
sys_ni_syscall
/*
old
uname
syscall
*/
.
llong
.
sys_umask
/*
60
*/
.
llong
.
sys_chroot
.
llong
.
sys_ustat
.
llong
.
sys_dup2
.
llong
.
sys_getppid
.
llong
.
sys_getpgrp
/*
65
*/
.
llong
.
sys_setsid
.
llong
.
sys_ni_syscall
.
llong
.
sys_sgetmask
.
llong
.
sys_ssetmask
.
llong
.
sys_setreuid
/*
70
*/
.
llong
.
sys_setregid
.
llong
.
sys_ni_syscall
.
llong
.
sys_ni_syscall
.
llong
.
sys_sethostname
.
llong
.
sys_setrlimit
/*
75
*/
.
llong
.
sys_ni_syscall
/*
old
getrlimit
syscall
*/
.
llong
.
sys_getrusage
.
llong
.
sys_gettimeofday
.
llong
.
sys_settimeofday
.
llong
.
sys_getgroups
/*
80
*/
.
llong
.
sys_setgroups
.
llong
.
sys_ni_syscall
/*
old
select
syscall
*/
.
llong
.
sys_symlink
.
llong
.
sys_ni_syscall
/*
old
lstat
syscall
*/
.
llong
.
sys_readlink
/*
85
*/
.
llong
.
sys_uselib
.
llong
.
sys_swapon
.
llong
.
sys_reboot
.
llong
.
sys_ni_syscall
/*
old
readdir
syscall
*/
.
llong
.
sys_mmap
/*
90
*/
.
llong
.
sys_munmap
.
llong
.
sys_truncate
.
llong
.
sys_ftruncate
.
llong
.
sys_fchmod
.
llong
.
sys_fchown
/*
95
*/
.
llong
.
sys_getpriority
.
llong
.
sys_setpriority
.
llong
.
sys_ni_syscall
/*
old
profil
syscall
holder
*/
.
llong
.
sys_statfs
.
llong
.
sys_fstatfs
/*
100
*/
.
llong
.
sys_ni_syscall
/*
old
ioperm
syscall
*/
.
llong
.
sys_socketcall
.
llong
.
sys_syslog
.
llong
.
sys_setitimer
.
llong
.
sys_getitimer
/*
105
*/
.
llong
.
sys_newstat
.
llong
.
sys_newlstat
.
llong
.
sys_newfstat
.
llong
.
sys_ni_syscall
/*
old
uname
syscall
*/
.
llong
.
sys_ni_syscall
/*
110
old
iopl
syscall
*/
.
llong
.
sys_vhangup
.
llong
.
sys_ni_syscall
/*
old
idle
syscall
*/
.
llong
.
sys_ni_syscall
/*
old
vm86
syscall
*/
.
llong
.
sys_wait4
.
llong
.
sys_swapoff
/*
115
*/
.
llong
.
sys_sysinfo
.
llong
.
sys_ipc
.
llong
.
sys_fsync
.
llong
.
sys_ni_syscall
.
llong
.
ppc_clone
/*
120
*/
.
llong
.
sys_setdomainname
.
llong
.
ppc_newuname
.
llong
.
sys_ni_syscall
/*
old
modify_ldt
syscall
*/
.
llong
.
sys_adjtimex
.
llong
.
sys_mprotect
/*
125
*/
.
llong
.
sys_ni_syscall
.
llong
.
sys_ni_syscall
/*
old
create_module
syscall
*/
.
llong
.
sys_init_module
.
llong
.
sys_delete_module
.
llong
.
sys_ni_syscall
/*
130
old
get_kernel_syms
syscall
*/
.
llong
.
sys_quotactl
.
llong
.
sys_getpgid
.
llong
.
sys_fchdir
.
llong
.
sys_bdflush
.
llong
.
sys_sysfs
/*
135
*/
.
llong
.
ppc64_personality
.
llong
.
sys_ni_syscall
/*
for
afs_syscall
*/
.
llong
.
sys_setfsuid
.
llong
.
sys_setfsgid
.
llong
.
sys_llseek
/*
140
*/
.
llong
.
sys_getdents
.
llong
.
sys_select
.
llong
.
sys_flock
.
llong
.
sys_msync
.
llong
.
sys_readv
/*
145
*/
.
llong
.
sys_writev
.
llong
.
sys_getsid
.
llong
.
sys_fdatasync
.
llong
.
sys_sysctl
.
llong
.
sys_mlock
/*
150
*/
.
llong
.
sys_munlock
.
llong
.
sys_mlockall
.
llong
.
sys_munlockall
.
llong
.
sys_sched_setparam
.
llong
.
sys_sched_getparam
/*
155
*/
.
llong
.
sys_sched_setscheduler
.
llong
.
sys_sched_getscheduler
.
llong
.
sys_sched_yield
.
llong
.
sys_sched_get_priority_max
.
llong
.
sys_sched_get_priority_min
/*
160
*/
.
llong
.
sys_sched_rr_get_interval
.
llong
.
sys_nanosleep
.
llong
.
sys_mremap
.
llong
.
sys_setresuid
.
llong
.
sys_getresuid
/*
165
*/
.
llong
.
sys_ni_syscall
/*
old
query_module
syscall
*/
.
llong
.
sys_poll
.
llong
.
sys_nfsservctl
.
llong
.
sys_setresgid
.
llong
.
sys_getresgid
/*
170
*/
.
llong
.
sys_prctl
.
llong
.
ppc64_rt_sigreturn
.
llong
.
sys_rt_sigaction
.
llong
.
sys_rt_sigprocmask
.
llong
.
sys_rt_sigpending
/*
175
*/
.
llong
.
sys_rt_sigtimedwait
.
llong
.
sys_rt_sigqueueinfo
.
llong
.
ppc64_rt_sigsuspend
.
llong
.
sys_pread64
.
llong
.
sys_pwrite64
/*
180
*/
.
llong
.
sys_chown
.
llong
.
sys_getcwd
.
llong
.
sys_capget
.
llong
.
sys_capset
.
llong
.
sys_sigaltstack
/*
185
*/
.
llong
.
sys_sendfile64
.
llong
.
sys_ni_syscall
/*
reserved
for
streams1
*/
.
llong
.
sys_ni_syscall
/*
reserved
for
streams2
*/
.
llong
.
ppc_vfork
.
llong
.
sys_getrlimit
/*
190
*/
.
llong
.
sys_readahead
.
llong
.
sys_ni_syscall
/*
32
bit
only
mmap2
*/
.
llong
.
sys_ni_syscall
/*
32
bit
only
truncate64
*/
.
llong
.
sys_ni_syscall
/*
32
bit
only
ftruncate64
*/
.
llong
.
sys_ni_syscall
/*
195
-
32
bit
only
stat64
*/
.
llong
.
sys_ni_syscall
/*
32
bit
only
lstat64
*/
.
llong
.
sys_ni_syscall
/*
32
bit
only
fstat64
*/
.
llong
.
sys_pciconfig_read
.
llong
.
sys_pciconfig_write
.
llong
.
sys_pciconfig_iobase
/*
200
-
pciconfig_iobase
*/
.
llong
.
sys_ni_syscall
/*
reserved
for
MacOnLinux
*/
.
llong
.
sys_getdents64
.
llong
.
sys_pivot_root
.
llong
.
sys_ni_syscall
/*
32
bit
only
fcntl64
*/
.
llong
.
sys_madvise
/*
205
*/
.
llong
.
sys_mincore
.
llong
.
sys_gettid
.
llong
.
sys_tkill
.
llong
.
sys_setxattr
.
llong
.
sys_lsetxattr
/*
210
*/
.
llong
.
sys_fsetxattr
.
llong
.
sys_getxattr
.
llong
.
sys_lgetxattr
.
llong
.
sys_fgetxattr
.
llong
.
sys_listxattr
/*
215
*/
.
llong
.
sys_llistxattr
.
llong
.
sys_flistxattr
.
llong
.
sys_removexattr
.
llong
.
sys_lremovexattr
.
llong
.
sys_fremovexattr
/*
220
*/
.
llong
.
sys_futex
.
llong
.
sys_sched_setaffinity
.
llong
.
sys_sched_getaffinity
.
llong
.
sys_ni_syscall
.
llong
.
sys_ni_syscall
/*
225
-
reserved
for
tux
*/
.
llong
.
sys_ni_syscall
/*
32
bit
only
sendfile64
*/
.
llong
.
sys_io_setup
.
llong
.
sys_io_destroy
.
llong
.
sys_io_getevents
.
llong
.
sys_io_submit
/*
230
*/
.
llong
.
sys_io_cancel
.
llong
.
sys_set_tid_address
.
llong
.
sys_fadvise64
.
llong
.
sys_exit_group
.
llong
.
sys_lookup_dcookie
/*
235
*/
.
llong
.
sys_epoll_create
.
llong
.
sys_epoll_ctl
.
llong
.
sys_epoll_wait
.
llong
.
sys_remap_file_pages
.
llong
.
sys_timer_create
/*
240
*/
.
llong
.
sys_timer_settime
.
llong
.
sys_timer_gettime
.
llong
.
sys_timer_getoverrun
.
llong
.
sys_timer_delete
.
llong
.
sys_clock_settime
/*
245
*/
.
llong
.
sys_clock_gettime
.
llong
.
sys_clock_getres
.
llong
.
sys_clock_nanosleep
.
llong
.
ppc64_swapcontext
.
llong
.
sys_tgkill
/*
250
*/
.
llong
.
sys_utimes
.
llong
.
sys_statfs64
.
llong
.
sys_fstatfs64
.
llong
.
sys_ni_syscall
/*
32
bit
only
fadvise64_64
*/
.
llong
.
ppc_rtas
/*
255
*/
.
llong
.
sys_ni_syscall
/*
256
reserved
for
sys_debug_setcontext
*/
.
llong
.
sys_ni_syscall
/*
257
reserved
for
vserver
*/
.
llong
.
sys_ni_syscall
/*
258
reserved
for
new
sys_remap_file_pages
*/
.
llong
.
sys_mbind
.
llong
.
sys_get_mempolicy
/*
260
*/
.
llong
.
sys_set_mempolicy
.
llong
.
sys_mq_open
.
llong
.
sys_mq_unlink
.
llong
.
sys_mq_timedsend
.
llong
.
sys_mq_timedreceive
/*
265
*/
.
llong
.
sys_mq_notify
.
llong
.
sys_mq_getsetattr
.
llong
.
sys_kexec_load
.
llong
.
sys_add_key
.
llong
.
sys_request_key
/*
270
*/
.
llong
.
sys_keyctl
.
llong
.
sys_waitid
.
llong
.
sys_ioprio_set
.
llong
.
sys_ioprio_get
.
llong
.
sys_inotify_init
/*
275
*/
.
llong
.
sys_inotify_add_watch
.
llong
.
sys_inotify_rm_watch
arch/ppc64/lib/Makefile
View file @
22b28032
...
...
@@ -2,17 +2,4 @@
# Makefile for ppc64-specific library files..
#
lib-y
:=
checksum.o string.o strcase.o
lib-y
+=
copypage.o memcpy.o copyuser.o usercopy.o
# Lock primitives are defined as no-ops in include/linux/spinlock.h
# for non-SMP configs. Don't build the real versions.
lib-$(CONFIG_SMP)
+=
locks.o
# e2a provides EBCDIC to ASCII conversions.
ifdef
CONFIG_PPC_ISERIES
obj-y
+=
e2a.o
endif
lib-$(CONFIG_DEBUG_KERNEL)
+=
sstep.o
lib-y
:=
string.o
arch/ppc64/lib/checksum.S
deleted
100644 → 0
View file @
45424376
/*
*
This
file
contains
assembly
-
language
implementations
*
of
IP
-
style
1
's complement checksum routines.
*
*
Copyright
(
C
)
1995
-
1996
Gary
Thomas
(
gdt
@
linuxppc
.
org
)
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*
*
Severely
hacked
about
by
Paul
Mackerras
(
paulus
@
cs
.
anu
.
edu
.
au
)
.
*/
#include <linux/sys.h>
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
/*
*
ip_fast_csum
(
r3
=
buf
,
r4
=
len
)
--
Optimized
for
IP
header
*
len
is
in
words
and
is
always
>=
5
.
*
*
In
practice
len
==
5
,
but
this
is
not
guaranteed
.
So
this
code
does
not
*
attempt
to
use
doubleword
instructions
.
*/
_GLOBAL
(
ip_fast_csum
)
lwz
r0
,
0
(
r3
)
lwzu
r5
,
4
(
r3
)
addic
.
r4
,
r4
,-
2
addc
r0
,
r0
,
r5
mtctr
r4
blelr
-
1
:
lwzu
r4
,
4
(
r3
)
adde
r0
,
r0
,
r4
bdnz
1
b
addze
r0
,
r0
/*
add
in
final
carry
*/
rldicl
r4
,
r0
,
32
,
0
/*
fold
two
32
-
bit
halves
together
*/
add
r0
,
r0
,
r4
srdi
r0
,
r0
,
32
rlwinm
r3
,
r0
,
16
,
0
,
31
/*
fold
two
halves
together
*/
add
r3
,
r0
,
r3
not
r3
,
r3
srwi
r3
,
r3
,
16
blr
/*
*
Compute
checksum
of
TCP
or
UDP
pseudo
-
header
:
*
csum_tcpudp_magic
(
r3
=
saddr
,
r4
=
daddr
,
r5
=
len
,
r6
=
proto
,
r7
=
sum
)
*
No
real
gain
trying
to
do
this
specially
for
64
bit
,
but
*
the
32
bit
addition
may
spill
into
the
upper
bits
of
*
the
doubleword
so
we
still
must
fold
it
down
from
64
.
*/
_GLOBAL
(
csum_tcpudp_magic
)
rlwimi
r5
,
r6
,
16
,
0
,
15
/*
put
proto
in
upper
half
of
len
*/
addc
r0
,
r3
,
r4
/*
add
4
32
-
bit
words
together
*/
adde
r0
,
r0
,
r5
adde
r0
,
r0
,
r7
rldicl
r4
,
r0
,
32
,
0
/*
fold
64
bit
value
*/
add
r0
,
r4
,
r0
srdi
r0
,
r0
,
32
rlwinm
r3
,
r0
,
16
,
0
,
31
/*
fold
two
halves
together
*/
add
r3
,
r0
,
r3
not
r3
,
r3
srwi
r3
,
r3
,
16
blr
/*
*
Computes
the
checksum
of
a
memory
block
at
buff
,
length
len
,
*
and
adds
in
"sum"
(
32
-
bit
)
.
*
*
This
code
assumes
at
least
halfword
alignment
,
though
the
length
*
can
be
any
number
of
bytes
.
The
sum
is
accumulated
in
r5
.
*
*
csum_partial
(
r3
=
buff
,
r4
=
len
,
r5
=
sum
)
*/
_GLOBAL
(
csum_partial
)
subi
r3
,
r3
,
8
/*
we
'll offset by 8 for the loads */
srdi
.
r6
,
r4
,
3
/*
divide
by
8
for
doubleword
count
*/
addic
r5
,
r5
,
0
/*
clear
carry
*/
beq
3
f
/*
if
we
're doing < 8 bytes */
andi
.
r0
,
r3
,
2
/*
aligned
on
a
word
boundary
already
?
*/
beq
+
1
f
lhz
r6
,
8
(
r3
)
/*
do
2
bytes
to
get
aligned
*/
addi
r3
,
r3
,
2
subi
r4
,
r4
,
2
addc
r5
,
r5
,
r6
srdi
.
r6
,
r4
,
3
/*
recompute
number
of
doublewords
*/
beq
3
f
/*
any
left
?
*/
1
:
mtctr
r6
2
:
ldu
r6
,
8
(
r3
)
/*
main
sum
loop
*/
adde
r5
,
r5
,
r6
bdnz
2
b
andi
.
r4
,
r4
,
7
/*
compute
bytes
left
to
sum
after
doublewords
*/
3
:
cmpwi
0
,
r4
,
4
/*
is
at
least
a
full
word
left
?
*/
blt
4
f
lwz
r6
,
8
(
r3
)
/*
sum
this
word
*/
addi
r3
,
r3
,
4
subi
r4
,
r4
,
4
adde
r5
,
r5
,
r6
4
:
cmpwi
0
,
r4
,
2
/*
is
at
least
a
halfword
left
?
*/
blt
+
5
f
lhz
r6
,
8
(
r3
)
/*
sum
this
halfword
*/
addi
r3
,
r3
,
2
subi
r4
,
r4
,
2
adde
r5
,
r5
,
r6
5
:
cmpwi
0
,
r4
,
1
/*
is
at
least
a
byte
left
?
*/
bne
+
6
f
lbz
r6
,
8
(
r3
)
/*
sum
this
byte
*/
slwi
r6
,
r6
,
8
/*
this
byte
is
assumed
to
be
the
upper
byte
of
a
halfword
*/
adde
r5
,
r5
,
r6
6
:
addze
r5
,
r5
/*
add
in
final
carry
*/
rldicl
r4
,
r5
,
32
,
0
/*
fold
two
32
-
bit
halves
together
*/
add
r3
,
r4
,
r5
srdi
r3
,
r3
,
32
blr
/*
*
Computes
the
checksum
of
a
memory
block
at
src
,
length
len
,
*
and
adds
in
"sum"
(
32
-
bit
),
while
copying
the
block
to
dst
.
*
If
an
access
exception
occurs
on
src
or
dst
,
it
stores
-
EFAULT
*
to
*
src_err
or
*
dst_err
respectively
,
and
(
for
an
error
on
*
src
)
zeroes
the
rest
of
dst
.
*
*
This
code
needs
to
be
reworked
to
take
advantage
of
64
bit
sum
+
copy
.
*
However
,
due
to
tokenring
halfword
alignment
problems
this
will
be
very
*
tricky
.
For
now
we
'll leave it until we instrument it somehow.
*
*
csum_partial_copy_generic
(
r3
=
src
,
r4
=
dst
,
r5
=
len
,
r6
=
sum
,
r7
=
src_err
,
r8
=
dst_err
)
*/
_GLOBAL
(
csum_partial_copy_generic
)
addic
r0
,
r6
,
0
subi
r3
,
r3
,
4
subi
r4
,
r4
,
4
srwi
.
r6
,
r5
,
2
beq
3
f
/*
if
we
're doing < 4 bytes */
andi
.
r9
,
r4
,
2
/*
Align
dst
to
longword
boundary
*/
beq
+
1
f
81
:
lhz
r6
,
4
(
r3
)
/*
do
2
bytes
to
get
aligned
*/
addi
r3
,
r3
,
2
subi
r5
,
r5
,
2
91
:
sth
r6
,
4
(
r4
)
addi
r4
,
r4
,
2
addc
r0
,
r0
,
r6
srwi
.
r6
,
r5
,
2
/*
#
words
to
do
*/
beq
3
f
1
:
mtctr
r6
82
:
lwzu
r6
,
4
(
r3
)
/*
the
bdnz
has
zero
overhead
,
so
it
should
*/
92
:
stwu
r6
,
4
(
r4
)
/*
be
unnecessary
to
unroll
this
loop
*/
adde
r0
,
r0
,
r6
bdnz
82
b
andi
.
r5
,
r5
,
3
3
:
cmpwi
0
,
r5
,
2
blt
+
4
f
83
:
lhz
r6
,
4
(
r3
)
addi
r3
,
r3
,
2
subi
r5
,
r5
,
2
93
:
sth
r6
,
4
(
r4
)
addi
r4
,
r4
,
2
adde
r0
,
r0
,
r6
4
:
cmpwi
0
,
r5
,
1
bne
+
5
f
84
:
lbz
r6
,
4
(
r3
)
94
:
stb
r6
,
4
(
r4
)
slwi
r6
,
r6
,
8
/*
Upper
byte
of
word
*/
adde
r0
,
r0
,
r6
5
:
addze
r3
,
r0
/*
add
in
final
carry
(
unlikely
with
64
-
bit
regs
)
*/
rldicl
r4
,
r3
,
32
,
0
/*
fold
64
bit
value
*/
add
r3
,
r4
,
r3
srdi
r3
,
r3
,
32
blr
/*
These
shouldn
't go in the fixup section, since that would
cause
the
ex_table
addresses
to
get
out
of
order
.
*/
.
globl
src_error_1
src_error_1
:
li
r6
,
0
subi
r5
,
r5
,
2
95
:
sth
r6
,
4
(
r4
)
addi
r4
,
r4
,
2
srwi
.
r6
,
r5
,
2
beq
3
f
mtctr
r6
.
globl
src_error_2
src_error_2
:
li
r6
,
0
96
:
stwu
r6
,
4
(
r4
)
bdnz
96
b
3
:
andi
.
r5
,
r5
,
3
beq
src_error
.
globl
src_error_3
src_error_3
:
li
r6
,
0
mtctr
r5
addi
r4
,
r4
,
3
97
:
stbu
r6
,
1
(
r4
)
bdnz
97
b
.
globl
src_error
src_error
:
cmpdi
0
,
r7
,
0
beq
1
f
li
r6
,-
EFAULT
stw
r6
,
0
(
r7
)
1
:
addze
r3
,
r0
blr
.
globl
dst_error
dst_error
:
cmpdi
0
,
r8
,
0
beq
1
f
li
r6
,-
EFAULT
stw
r6
,
0
(
r8
)
1
:
addze
r3
,
r0
blr
.
section
__ex_table
,"
a
"
.
align
3
.
llong
81
b
,
src_error_1
.
llong
91
b
,
dst_error
.
llong
82
b
,
src_error_2
.
llong
92
b
,
dst_error
.
llong
83
b
,
src_error_3
.
llong
93
b
,
dst_error
.
llong
84
b
,
src_error_3
.
llong
94
b
,
dst_error
.
llong
95
b
,
dst_error
.
llong
96
b
,
dst_error
.
llong
97
b
,
dst_error
arch/ppc64/lib/copypage.S
deleted
100644 → 0
View file @
45424376
/*
*
arch
/
ppc64
/
lib
/
copypage
.
S
*
*
Copyright
(
C
)
2002
Paul
Mackerras
,
IBM
Corp
.
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
_GLOBAL
(
copy_page
)
std
r31
,-
8
(
1
)
std
r30
,-
16
(
1
)
std
r29
,-
24
(
1
)
std
r28
,-
32
(
1
)
std
r27
,-
40
(
1
)
std
r26
,-
48
(
1
)
std
r25
,-
56
(
1
)
std
r24
,-
64
(
1
)
std
r23
,-
72
(
1
)
std
r22
,-
80
(
1
)
std
r21
,-
88
(
1
)
std
r20
,-
96
(
1
)
li
r5
,
4096
/
32
-
1
addi
r3
,
r3
,-
8
li
r12
,
5
0
:
addi
r5
,
r5
,-
24
mtctr
r12
ld
r22
,
640
(
4
)
ld
r21
,
512
(
4
)
ld
r20
,
384
(
4
)
ld
r11
,
256
(
4
)
ld
r9
,
128
(
4
)
ld
r7
,
0
(
4
)
ld
r25
,
648
(
4
)
ld
r24
,
520
(
4
)
ld
r23
,
392
(
4
)
ld
r10
,
264
(
4
)
ld
r8
,
136
(
4
)
ldu
r6
,
8
(
4
)
cmpwi
r5
,
24
1
:
std
r22
,
648
(
3
)
std
r21
,
520
(
3
)
std
r20
,
392
(
3
)
std
r11
,
264
(
3
)
std
r9
,
136
(
3
)
std
r7
,
8
(
3
)
ld
r28
,
648
(
4
)
ld
r27
,
520
(
4
)
ld
r26
,
392
(
4
)
ld
r31
,
264
(
4
)
ld
r30
,
136
(
4
)
ld
r29
,
8
(
4
)
std
r25
,
656
(
3
)
std
r24
,
528
(
3
)
std
r23
,
400
(
3
)
std
r10
,
272
(
3
)
std
r8
,
144
(
3
)
std
r6
,
16
(
3
)
ld
r22
,
656
(
4
)
ld
r21
,
528
(
4
)
ld
r20
,
400
(
4
)
ld
r11
,
272
(
4
)
ld
r9
,
144
(
4
)
ld
r7
,
16
(
4
)
std
r28
,
664
(
3
)
std
r27
,
536
(
3
)
std
r26
,
408
(
3
)
std
r31
,
280
(
3
)
std
r30
,
152
(
3
)
stdu
r29
,
24
(
3
)
ld
r25
,
664
(
4
)
ld
r24
,
536
(
4
)
ld
r23
,
408
(
4
)
ld
r10
,
280
(
4
)
ld
r8
,
152
(
4
)
ldu
r6
,
24
(
4
)
bdnz
1
b
std
r22
,
648
(
3
)
std
r21
,
520
(
3
)
std
r20
,
392
(
3
)
std
r11
,
264
(
3
)
std
r9
,
136
(
3
)
std
r7
,
8
(
3
)
addi
r4
,
r4
,
640
addi
r3
,
r3
,
648
bge
0
b
mtctr
r5
ld
r7
,
0
(
4
)
ld
r8
,
8
(
4
)
ldu
r9
,
16
(
4
)
3
:
ld
r10
,
8
(
4
)
std
r7
,
8
(
3
)
ld
r7
,
16
(
4
)
std
r8
,
16
(
3
)
ld
r8
,
24
(
4
)
std
r9
,
24
(
3
)
ldu
r9
,
32
(
4
)
stdu
r10
,
32
(
3
)
bdnz
3
b
4
:
ld
r10
,
8
(
4
)
std
r7
,
8
(
3
)
std
r8
,
16
(
3
)
std
r9
,
24
(
3
)
std
r10
,
32
(
3
)
9
:
ld
r20
,-
96
(
1
)
ld
r21
,-
88
(
1
)
ld
r22
,-
80
(
1
)
ld
r23
,-
72
(
1
)
ld
r24
,-
64
(
1
)
ld
r25
,-
56
(
1
)
ld
r26
,-
48
(
1
)
ld
r27
,-
40
(
1
)
ld
r28
,-
32
(
1
)
ld
r29
,-
24
(
1
)
ld
r30
,-
16
(
1
)
ld
r31
,-
8
(
1
)
blr
arch/ppc64/lib/copyuser.S
deleted
100644 → 0
View file @
45424376
/*
*
arch
/
ppc64
/
lib
/
copyuser
.
S
*
*
Copyright
(
C
)
2002
Paul
Mackerras
,
IBM
Corp
.
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
.
align
7
_GLOBAL
(
__copy_tofrom_user
)
/
*
first
check
for
a
whole
page
copy
on
a
page
boundary
*/
cmpldi
cr1
,
r5
,
16
cmpdi
cr6
,
r5
,
4096
or
r0
,
r3
,
r4
neg
r6
,
r3
/*
LS
3
bits
=
#
bytes
to
8
-
byte
dest
bdry
*/
andi
.
r0
,
r0
,
4095
std
r3
,-
24
(
r1
)
crand
cr0
*
4
+
2
,
cr0
*
4
+
2
,
cr6
*
4
+
2
std
r4
,-
16
(
r1
)
std
r5
,-
8
(
r1
)
dcbt
0
,
r4
beq
.
Lcopy_page
andi
.
r6
,
r6
,
7
mtcrf
0x01
,
r5
blt
cr1
,
.
Lshort_copy
bne
.
Ldst_unaligned
.
Ldst_aligned
:
andi
.
r0
,
r4
,
7
addi
r3
,
r3
,-
16
bne
.
Lsrc_unaligned
srdi
r7
,
r5
,
4
20
:
ld
r9
,
0
(
r4
)
addi
r4
,
r4
,-
8
mtctr
r7
andi
.
r5
,
r5
,
7
bf
cr7
*
4
+
0
,
22
f
addi
r3
,
r3
,
8
addi
r4
,
r4
,
8
mr
r8
,
r9
blt
cr1
,
72
f
21
:
ld
r9
,
8
(
r4
)
70
:
std
r8
,
8
(
r3
)
22
:
ldu
r8
,
16
(
r4
)
71
:
stdu
r9
,
16
(
r3
)
bdnz
21
b
72
:
std
r8
,
8
(
r3
)
beq
+
3
f
addi
r3
,
r3
,
16
23
:
ld
r9
,
8
(
r4
)
.
Ldo_tail
:
bf
cr7
*
4
+
1
,
1
f
rotldi
r9
,
r9
,
32
73
:
stw
r9
,
0
(
r3
)
addi
r3
,
r3
,
4
1
:
bf
cr7
*
4
+
2
,
2
f
rotldi
r9
,
r9
,
16
74
:
sth
r9
,
0
(
r3
)
addi
r3
,
r3
,
2
2
:
bf
cr7
*
4
+
3
,
3
f
rotldi
r9
,
r9
,
8
75
:
stb
r9
,
0
(
r3
)
3
:
li
r3
,
0
blr
.
Lsrc_unaligned
:
srdi
r6
,
r5
,
3
addi
r5
,
r5
,-
16
subf
r4
,
r0
,
r4
srdi
r7
,
r5
,
4
sldi
r10
,
r0
,
3
cmpldi
cr6
,
r6
,
3
andi
.
r5
,
r5
,
7
mtctr
r7
subfic
r11
,
r10
,
64
add
r5
,
r5
,
r0
bt
cr7
*
4
+
0
,
28
f
24
:
ld
r9
,
0
(
r4
)
/*
3
+
2
n
loads
,
2
+
2
n
stores
*/
25
:
ld
r0
,
8
(
r4
)
sld
r6
,
r9
,
r10
26
:
ldu
r9
,
16
(
r4
)
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
or
r7
,
r7
,
r6
blt
cr6
,
79
f
27
:
ld
r0
,
8
(
r4
)
b
2
f
28
:
ld
r0
,
0
(
r4
)
/*
4
+
2
n
loads
,
3
+
2
n
stores
*/
29
:
ldu
r9
,
8
(
r4
)
sld
r8
,
r0
,
r10
addi
r3
,
r3
,-
8
blt
cr6
,
5
f
30
:
ld
r0
,
8
(
r4
)
srd
r12
,
r9
,
r11
sld
r6
,
r9
,
r10
31
:
ldu
r9
,
16
(
r4
)
or
r12
,
r8
,
r12
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
addi
r3
,
r3
,
16
beq
cr6
,
78
f
1
:
or
r7
,
r7
,
r6
32
:
ld
r0
,
8
(
r4
)
76
:
std
r12
,
8
(
r3
)
2
:
srd
r12
,
r9
,
r11
sld
r6
,
r9
,
r10
33
:
ldu
r9
,
16
(
r4
)
or
r12
,
r8
,
r12
77
:
stdu
r7
,
16
(
r3
)
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
bdnz
1
b
78
:
std
r12
,
8
(
r3
)
or
r7
,
r7
,
r6
79
:
std
r7
,
16
(
r3
)
5
:
srd
r12
,
r9
,
r11
or
r12
,
r8
,
r12
80
:
std
r12
,
24
(
r3
)
bne
6
f
li
r3
,
0
blr
6
:
cmpwi
cr1
,
r5
,
8
addi
r3
,
r3
,
32
sld
r9
,
r9
,
r10
ble
cr1
,
.
Ldo_tail
34
:
ld
r0
,
8
(
r4
)
srd
r7
,
r0
,
r11
or
r9
,
r7
,
r9
b
.
Ldo_tail
.
Ldst_unaligned
:
mtcrf
0x01
,
r6
/*
put
#
bytes
to
8
B
bdry
into
cr7
*/
subf
r5
,
r6
,
r5
li
r7
,
0
cmpldi
r1
,
r5
,
16
bf
cr7
*
4
+
3
,
1
f
35
:
lbz
r0
,
0
(
r4
)
81
:
stb
r0
,
0
(
r3
)
addi
r7
,
r7
,
1
1
:
bf
cr7
*
4
+
2
,
2
f
36
:
lhzx
r0
,
r7
,
r4
82
:
sthx
r0
,
r7
,
r3
addi
r7
,
r7
,
2
2
:
bf
cr7
*
4
+
1
,
3
f
37
:
lwzx
r0
,
r7
,
r4
83
:
stwx
r0
,
r7
,
r3
3
:
mtcrf
0x01
,
r5
add
r4
,
r6
,
r4
add
r3
,
r6
,
r3
b
.
Ldst_aligned
.
Lshort_copy
:
bf
cr7
*
4
+
0
,
1
f
38
:
lwz
r0
,
0
(
r4
)
39
:
lwz
r9
,
4
(
r4
)
addi
r4
,
r4
,
8
84
:
stw
r0
,
0
(
r3
)
85
:
stw
r9
,
4
(
r3
)
addi
r3
,
r3
,
8
1
:
bf
cr7
*
4
+
1
,
2
f
40
:
lwz
r0
,
0
(
r4
)
addi
r4
,
r4
,
4
86
:
stw
r0
,
0
(
r3
)
addi
r3
,
r3
,
4
2
:
bf
cr7
*
4
+
2
,
3
f
41
:
lhz
r0
,
0
(
r4
)
addi
r4
,
r4
,
2
87
:
sth
r0
,
0
(
r3
)
addi
r3
,
r3
,
2
3
:
bf
cr7
*
4
+
3
,
4
f
42
:
lbz
r0
,
0
(
r4
)
88
:
stb
r0
,
0
(
r3
)
4
:
li
r3
,
0
blr
/*
*
exception
handlers
follow
*
we
have
to
return
the
number
of
bytes
not
copied
*
for
an
exception
on
a
load
,
we
set
the
rest
of
the
destination
to
0
*/
136
:
137
:
add
r3
,
r3
,
r7
b
1
f
130
:
131
:
addi
r3
,
r3
,
8
120
:
122
:
124
:
125
:
126
:
127
:
128
:
129
:
133
:
addi
r3
,
r3
,
8
121
:
132
:
addi
r3
,
r3
,
8
123
:
134
:
135
:
138
:
139
:
140
:
141
:
142
:
/*
*
here
we
have
had
a
fault
on
a
load
and
r3
points
to
the
first
*
unmodified
byte
of
the
destination
*/
1
:
ld
r6
,-
24
(
r1
)
ld
r4
,-
16
(
r1
)
ld
r5
,-
8
(
r1
)
subf
r6
,
r6
,
r3
add
r4
,
r4
,
r6
subf
r5
,
r6
,
r5
/*
#
bytes
left
to
go
*/
/*
*
first
see
if
we
can
copy
any
more
bytes
before
hitting
another
exception
*/
mtctr
r5
43
:
lbz
r0
,
0
(
r4
)
addi
r4
,
r4
,
1
89
:
stb
r0
,
0
(
r3
)
addi
r3
,
r3
,
1
bdnz
43
b
li
r3
,
0
/*
huh
?
all
copied
successfully
this
time
?
*/
blr
/*
*
here
we
have
trapped
again
,
need
to
clear
ctr
bytes
starting
at
r3
*/
143
:
mfctr
r5
li
r0
,
0
mr
r4
,
r3
mr
r3
,
r5
/*
return
the
number
of
bytes
not
copied
*/
1
:
andi
.
r9
,
r4
,
7
beq
3
f
90
:
stb
r0
,
0
(
r4
)
addic
.
r5
,
r5
,-
1
addi
r4
,
r4
,
1
bne
1
b
blr
3
:
cmpldi
cr1
,
r5
,
8
srdi
r9
,
r5
,
3
andi
.
r5
,
r5
,
7
blt
cr1
,
93
f
mtctr
r9
91
:
std
r0
,
0
(
r4
)
addi
r4
,
r4
,
8
bdnz
91
b
93
:
beqlr
mtctr
r5
92
:
stb
r0
,
0
(
r4
)
addi
r4
,
r4
,
1
bdnz
92
b
blr
/*
*
exception
handlers
for
stores
:
we
just
need
to
work
*
out
how
many
bytes
weren
't copied
*/
182
:
183
:
add
r3
,
r3
,
r7
b
1
f
180
:
addi
r3
,
r3
,
8
171
:
177
:
addi
r3
,
r3
,
8
170
:
172
:
176
:
178
:
addi
r3
,
r3
,
4
185
:
addi
r3
,
r3
,
4
173
:
174
:
175
:
179
:
181
:
184
:
186
:
187
:
188
:
189
:
1
:
ld
r6
,-
24
(
r1
)
ld
r5
,-
8
(
r1
)
add
r6
,
r6
,
r5
subf
r3
,
r3
,
r6
/*
#
bytes
not
copied
*/
190
:
191
:
192
:
blr
/*
#
bytes
not
copied
in
r3
*/
.
section
__ex_table
,
"a"
.
align
3
.
llong
20
b
,
120
b
.
llong
21
b
,
121
b
.
llong
70
b
,
170
b
.
llong
22
b
,
122
b
.
llong
71
b
,
171
b
.
llong
72
b
,
172
b
.
llong
23
b
,
123
b
.
llong
73
b
,
173
b
.
llong
74
b
,
174
b
.
llong
75
b
,
175
b
.
llong
24
b
,
124
b
.
llong
25
b
,
125
b
.
llong
26
b
,
126
b
.
llong
27
b
,
127
b
.
llong
28
b
,
128
b
.
llong
29
b
,
129
b
.
llong
30
b
,
130
b
.
llong
31
b
,
131
b
.
llong
32
b
,
132
b
.
llong
76
b
,
176
b
.
llong
33
b
,
133
b
.
llong
77
b
,
177
b
.
llong
78
b
,
178
b
.
llong
79
b
,
179
b
.
llong
80
b
,
180
b
.
llong
34
b
,
134
b
.
llong
35
b
,
135
b
.
llong
81
b
,
181
b
.
llong
36
b
,
136
b
.
llong
82
b
,
182
b
.
llong
37
b
,
137
b
.
llong
83
b
,
183
b
.
llong
38
b
,
138
b
.
llong
39
b
,
139
b
.
llong
84
b
,
184
b
.
llong
85
b
,
185
b
.
llong
40
b
,
140
b
.
llong
86
b
,
186
b
.
llong
41
b
,
141
b
.
llong
87
b
,
187
b
.
llong
42
b
,
142
b
.
llong
88
b
,
188
b
.
llong
43
b
,
143
b
.
llong
89
b
,
189
b
.
llong
90
b
,
190
b
.
llong
91
b
,
191
b
.
llong
92
b
,
192
b
.
text
/*
*
Routine
to
copy
a
whole
page
of
data
,
optimized
for
POWER4
.
*
On
POWER4
it
is
more
than
50
%
faster
than
the
simple
loop
*
above
(
following
the
.
Ldst_aligned
label
)
but
it
runs
slightly
*
slower
on
POWER3
.
*/
.
Lcopy_page
:
std
r31
,-
32
(
1
)
std
r30
,-
40
(
1
)
std
r29
,-
48
(
1
)
std
r28
,-
56
(
1
)
std
r27
,-
64
(
1
)
std
r26
,-
72
(
1
)
std
r25
,-
80
(
1
)
std
r24
,-
88
(
1
)
std
r23
,-
96
(
1
)
std
r22
,-
104
(
1
)
std
r21
,-
112
(
1
)
std
r20
,-
120
(
1
)
li
r5
,
4096
/
32
-
1
addi
r3
,
r3
,-
8
li
r0
,
5
0
:
addi
r5
,
r5
,-
24
mtctr
r0
20
:
ld
r22
,
640
(
4
)
21
:
ld
r21
,
512
(
4
)
22
:
ld
r20
,
384
(
4
)
23
:
ld
r11
,
256
(
4
)
24
:
ld
r9
,
128
(
4
)
25
:
ld
r7
,
0
(
4
)
26
:
ld
r25
,
648
(
4
)
27
:
ld
r24
,
520
(
4
)
28
:
ld
r23
,
392
(
4
)
29
:
ld
r10
,
264
(
4
)
30
:
ld
r8
,
136
(
4
)
31
:
ldu
r6
,
8
(
4
)
cmpwi
r5
,
24
1
:
32
:
std
r22
,
648
(
3
)
33
:
std
r21
,
520
(
3
)
34
:
std
r20
,
392
(
3
)
35
:
std
r11
,
264
(
3
)
36
:
std
r9
,
136
(
3
)
37
:
std
r7
,
8
(
3
)
38
:
ld
r28
,
648
(
4
)
39
:
ld
r27
,
520
(
4
)
40
:
ld
r26
,
392
(
4
)
41
:
ld
r31
,
264
(
4
)
42
:
ld
r30
,
136
(
4
)
43
:
ld
r29
,
8
(
4
)
44
:
std
r25
,
656
(
3
)
45
:
std
r24
,
528
(
3
)
46
:
std
r23
,
400
(
3
)
47
:
std
r10
,
272
(
3
)
48
:
std
r8
,
144
(
3
)
49
:
std
r6
,
16
(
3
)
50
:
ld
r22
,
656
(
4
)
51
:
ld
r21
,
528
(
4
)
52
:
ld
r20
,
400
(
4
)
53
:
ld
r11
,
272
(
4
)
54
:
ld
r9
,
144
(
4
)
55
:
ld
r7
,
16
(
4
)
56
:
std
r28
,
664
(
3
)
57
:
std
r27
,
536
(
3
)
58
:
std
r26
,
408
(
3
)
59
:
std
r31
,
280
(
3
)
60
:
std
r30
,
152
(
3
)
61
:
stdu
r29
,
24
(
3
)
62
:
ld
r25
,
664
(
4
)
63
:
ld
r24
,
536
(
4
)
64
:
ld
r23
,
408
(
4
)
65
:
ld
r10
,
280
(
4
)
66
:
ld
r8
,
152
(
4
)
67
:
ldu
r6
,
24
(
4
)
bdnz
1
b
68
:
std
r22
,
648
(
3
)
69
:
std
r21
,
520
(
3
)
70
:
std
r20
,
392
(
3
)
71
:
std
r11
,
264
(
3
)
72
:
std
r9
,
136
(
3
)
73
:
std
r7
,
8
(
3
)
74
:
addi
r4
,
r4
,
640
75
:
addi
r3
,
r3
,
648
bge
0
b
mtctr
r5
76
:
ld
r7
,
0
(
4
)
77
:
ld
r8
,
8
(
4
)
78
:
ldu
r9
,
16
(
4
)
3
:
79
:
ld
r10
,
8
(
4
)
80
:
std
r7
,
8
(
3
)
81
:
ld
r7
,
16
(
4
)
82
:
std
r8
,
16
(
3
)
83
:
ld
r8
,
24
(
4
)
84
:
std
r9
,
24
(
3
)
85
:
ldu
r9
,
32
(
4
)
86
:
stdu
r10
,
32
(
3
)
bdnz
3
b
4
:
87
:
ld
r10
,
8
(
4
)
88
:
std
r7
,
8
(
3
)
89
:
std
r8
,
16
(
3
)
90
:
std
r9
,
24
(
3
)
91
:
std
r10
,
32
(
3
)
9
:
ld
r20
,-
120
(
1
)
ld
r21
,-
112
(
1
)
ld
r22
,-
104
(
1
)
ld
r23
,-
96
(
1
)
ld
r24
,-
88
(
1
)
ld
r25
,-
80
(
1
)
ld
r26
,-
72
(
1
)
ld
r27
,-
64
(
1
)
ld
r28
,-
56
(
1
)
ld
r29
,-
48
(
1
)
ld
r30
,-
40
(
1
)
ld
r31
,-
32
(
1
)
li
r3
,
0
blr
/*
*
on
an
exception
,
reset
to
the
beginning
and
jump
back
into
the
*
standard
__copy_tofrom_user
*/
100
:
ld
r20
,-
120
(
1
)
ld
r21
,-
112
(
1
)
ld
r22
,-
104
(
1
)
ld
r23
,-
96
(
1
)
ld
r24
,-
88
(
1
)
ld
r25
,-
80
(
1
)
ld
r26
,-
72
(
1
)
ld
r27
,-
64
(
1
)
ld
r28
,-
56
(
1
)
ld
r29
,-
48
(
1
)
ld
r30
,-
40
(
1
)
ld
r31
,-
32
(
1
)
ld
r3
,-
24
(
r1
)
ld
r4
,-
16
(
r1
)
li
r5
,
4096
b
.
Ldst_aligned
.
section
__ex_table
,
"a"
.
align
3
.
llong
20
b
,
100
b
.
llong
21
b
,
100
b
.
llong
22
b
,
100
b
.
llong
23
b
,
100
b
.
llong
24
b
,
100
b
.
llong
25
b
,
100
b
.
llong
26
b
,
100
b
.
llong
27
b
,
100
b
.
llong
28
b
,
100
b
.
llong
29
b
,
100
b
.
llong
30
b
,
100
b
.
llong
31
b
,
100
b
.
llong
32
b
,
100
b
.
llong
33
b
,
100
b
.
llong
34
b
,
100
b
.
llong
35
b
,
100
b
.
llong
36
b
,
100
b
.
llong
37
b
,
100
b
.
llong
38
b
,
100
b
.
llong
39
b
,
100
b
.
llong
40
b
,
100
b
.
llong
41
b
,
100
b
.
llong
42
b
,
100
b
.
llong
43
b
,
100
b
.
llong
44
b
,
100
b
.
llong
45
b
,
100
b
.
llong
46
b
,
100
b
.
llong
47
b
,
100
b
.
llong
48
b
,
100
b
.
llong
49
b
,
100
b
.
llong
50
b
,
100
b
.
llong
51
b
,
100
b
.
llong
52
b
,
100
b
.
llong
53
b
,
100
b
.
llong
54
b
,
100
b
.
llong
55
b
,
100
b
.
llong
56
b
,
100
b
.
llong
57
b
,
100
b
.
llong
58
b
,
100
b
.
llong
59
b
,
100
b
.
llong
60
b
,
100
b
.
llong
61
b
,
100
b
.
llong
62
b
,
100
b
.
llong
63
b
,
100
b
.
llong
64
b
,
100
b
.
llong
65
b
,
100
b
.
llong
66
b
,
100
b
.
llong
67
b
,
100
b
.
llong
68
b
,
100
b
.
llong
69
b
,
100
b
.
llong
70
b
,
100
b
.
llong
71
b
,
100
b
.
llong
72
b
,
100
b
.
llong
73
b
,
100
b
.
llong
74
b
,
100
b
.
llong
75
b
,
100
b
.
llong
76
b
,
100
b
.
llong
77
b
,
100
b
.
llong
78
b
,
100
b
.
llong
79
b
,
100
b
.
llong
80
b
,
100
b
.
llong
81
b
,
100
b
.
llong
82
b
,
100
b
.
llong
83
b
,
100
b
.
llong
84
b
,
100
b
.
llong
85
b
,
100
b
.
llong
86
b
,
100
b
.
llong
87
b
,
100
b
.
llong
88
b
,
100
b
.
llong
89
b
,
100
b
.
llong
90
b
,
100
b
.
llong
91
b
,
100
b
arch/ppc64/lib/e2a.c
deleted
100644 → 0
View file @
45424376
/*
* arch/ppc64/lib/e2a.c
*
* EBCDIC to ASCII conversion
*
* This function moved here from arch/ppc64/kernel/viopath.c
*
* (C) Copyright 2000-2004 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
unsigned
char
e2a
(
unsigned
char
x
)
{
switch
(
x
)
{
case
0xF0
:
return
'0'
;
case
0xF1
:
return
'1'
;
case
0xF2
:
return
'2'
;
case
0xF3
:
return
'3'
;
case
0xF4
:
return
'4'
;
case
0xF5
:
return
'5'
;
case
0xF6
:
return
'6'
;
case
0xF7
:
return
'7'
;
case
0xF8
:
return
'8'
;
case
0xF9
:
return
'9'
;
case
0xC1
:
return
'A'
;
case
0xC2
:
return
'B'
;
case
0xC3
:
return
'C'
;
case
0xC4
:
return
'D'
;
case
0xC5
:
return
'E'
;
case
0xC6
:
return
'F'
;
case
0xC7
:
return
'G'
;
case
0xC8
:
return
'H'
;
case
0xC9
:
return
'I'
;
case
0xD1
:
return
'J'
;
case
0xD2
:
return
'K'
;
case
0xD3
:
return
'L'
;
case
0xD4
:
return
'M'
;
case
0xD5
:
return
'N'
;
case
0xD6
:
return
'O'
;
case
0xD7
:
return
'P'
;
case
0xD8
:
return
'Q'
;
case
0xD9
:
return
'R'
;
case
0xE2
:
return
'S'
;
case
0xE3
:
return
'T'
;
case
0xE4
:
return
'U'
;
case
0xE5
:
return
'V'
;
case
0xE6
:
return
'W'
;
case
0xE7
:
return
'X'
;
case
0xE8
:
return
'Y'
;
case
0xE9
:
return
'Z'
;
}
return
' '
;
}
EXPORT_SYMBOL
(
e2a
);
arch/ppc64/lib/locks.c
deleted
100644 → 0
View file @
45424376
/*
* Spin and read/write lock operations.
*
* Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
* Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
* Rework to support virtual processors
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/stringify.h>
#include <asm/hvcall.h>
#include <asm/iSeries/HvCall.h>
/* waiting for a spinlock... */
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
void
__spin_yield
(
raw_spinlock_t
*
lock
)
{
unsigned
int
lock_value
,
holder_cpu
,
yield_count
;
struct
paca_struct
*
holder_paca
;
lock_value
=
lock
->
slock
;
if
(
lock_value
==
0
)
return
;
holder_cpu
=
lock_value
&
0xffff
;
BUG_ON
(
holder_cpu
>=
NR_CPUS
);
holder_paca
=
&
paca
[
holder_cpu
];
yield_count
=
holder_paca
->
lppaca
.
yield_count
;
if
((
yield_count
&
1
)
==
0
)
return
;
/* virtual cpu is currently running */
rmb
();
if
(
lock
->
slock
!=
lock_value
)
return
;
/* something has changed */
#ifdef CONFIG_PPC_ISERIES
HvCall2
(
HvCallBaseYieldProcessor
,
HvCall_YieldToProc
,
((
u64
)
holder_cpu
<<
32
)
|
yield_count
);
#else
plpar_hcall_norets
(
H_CONFER
,
get_hard_smp_processor_id
(
holder_cpu
),
yield_count
);
#endif
}
/*
* Waiting for a read lock or a write lock on a rwlock...
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
void
__rw_yield
(
raw_rwlock_t
*
rw
)
{
int
lock_value
;
unsigned
int
holder_cpu
,
yield_count
;
struct
paca_struct
*
holder_paca
;
lock_value
=
rw
->
lock
;
if
(
lock_value
>=
0
)
return
;
/* no write lock at present */
holder_cpu
=
lock_value
&
0xffff
;
BUG_ON
(
holder_cpu
>=
NR_CPUS
);
holder_paca
=
&
paca
[
holder_cpu
];
yield_count
=
holder_paca
->
lppaca
.
yield_count
;
if
((
yield_count
&
1
)
==
0
)
return
;
/* virtual cpu is currently running */
rmb
();
if
(
rw
->
lock
!=
lock_value
)
return
;
/* something has changed */
#ifdef CONFIG_PPC_ISERIES
HvCall2
(
HvCallBaseYieldProcessor
,
HvCall_YieldToProc
,
((
u64
)
holder_cpu
<<
32
)
|
yield_count
);
#else
plpar_hcall_norets
(
H_CONFER
,
get_hard_smp_processor_id
(
holder_cpu
),
yield_count
);
#endif
}
#endif
void
__raw_spin_unlock_wait
(
raw_spinlock_t
*
lock
)
{
while
(
lock
->
slock
)
{
HMT_low
();
if
(
SHARED_PROCESSOR
)
__spin_yield
(
lock
);
}
HMT_medium
();
}
EXPORT_SYMBOL
(
__raw_spin_unlock_wait
);
arch/ppc64/lib/memcpy.S
deleted
100644 → 0
View file @
45424376
/*
*
arch
/
ppc64
/
lib
/
memcpy
.
S
*
*
Copyright
(
C
)
2002
Paul
Mackerras
,
IBM
Corp
.
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
.
align
7
_GLOBAL
(
memcpy
)
mtcrf
0x01
,
r5
cmpldi
cr1
,
r5
,
16
neg
r6
,
r3
#
LS
3
bits
=
#
bytes
to
8
-
byte
dest
bdry
andi
.
r6
,
r6
,
7
dcbt
0
,
r4
blt
cr1
,
.
Lshort_copy
bne
.
Ldst_unaligned
.
Ldst_aligned
:
andi
.
r0
,
r4
,
7
addi
r3
,
r3
,-
16
bne
.
Lsrc_unaligned
srdi
r7
,
r5
,
4
ld
r9
,
0
(
r4
)
addi
r4
,
r4
,-
8
mtctr
r7
andi
.
r5
,
r5
,
7
bf
cr7
*
4
+
0
,
2
f
addi
r3
,
r3
,
8
addi
r4
,
r4
,
8
mr
r8
,
r9
blt
cr1
,
3
f
1
:
ld
r9
,
8
(
r4
)
std
r8
,
8
(
r3
)
2
:
ldu
r8
,
16
(
r4
)
stdu
r9
,
16
(
r3
)
bdnz
1
b
3
:
std
r8
,
8
(
r3
)
beqlr
addi
r3
,
r3
,
16
ld
r9
,
8
(
r4
)
.
Ldo_tail
:
bf
cr7
*
4
+
1
,
1
f
rotldi
r9
,
r9
,
32
stw
r9
,
0
(
r3
)
addi
r3
,
r3
,
4
1
:
bf
cr7
*
4
+
2
,
2
f
rotldi
r9
,
r9
,
16
sth
r9
,
0
(
r3
)
addi
r3
,
r3
,
2
2
:
bf
cr7
*
4
+
3
,
3
f
rotldi
r9
,
r9
,
8
stb
r9
,
0
(
r3
)
3
:
blr
.
Lsrc_unaligned
:
srdi
r6
,
r5
,
3
addi
r5
,
r5
,-
16
subf
r4
,
r0
,
r4
srdi
r7
,
r5
,
4
sldi
r10
,
r0
,
3
cmpdi
cr6
,
r6
,
3
andi
.
r5
,
r5
,
7
mtctr
r7
subfic
r11
,
r10
,
64
add
r5
,
r5
,
r0
bt
cr7
*
4
+
0
,
0
f
ld
r9
,
0
(
r4
)
#
3
+
2
n
loads
,
2
+
2
n
stores
ld
r0
,
8
(
r4
)
sld
r6
,
r9
,
r10
ldu
r9
,
16
(
r4
)
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
or
r7
,
r7
,
r6
blt
cr6
,
4
f
ld
r0
,
8
(
r4
)
#
s1
<<
in
r8
,
d0
=(
s0
<<|
s1
>>)
in
r7
,
s3
in
r0
,
s2
in
r9
,
nix
in
r6
&
r12
b
2
f
0
:
ld
r0
,
0
(
r4
)
#
4
+
2
n
loads
,
3
+
2
n
stores
ldu
r9
,
8
(
r4
)
sld
r8
,
r0
,
r10
addi
r3
,
r3
,-
8
blt
cr6
,
5
f
ld
r0
,
8
(
r4
)
srd
r12
,
r9
,
r11
sld
r6
,
r9
,
r10
ldu
r9
,
16
(
r4
)
or
r12
,
r8
,
r12
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
addi
r3
,
r3
,
16
beq
cr6
,
3
f
#
d0
=(
s0
<<|
s1
>>)
in
r12
,
s1
<<
in
r6
,
s2
>>
in
r7
,
s2
<<
in
r8
,
s3
in
r9
1
:
or
r7
,
r7
,
r6
ld
r0
,
8
(
r4
)
std
r12
,
8
(
r3
)
2
:
srd
r12
,
r9
,
r11
sld
r6
,
r9
,
r10
ldu
r9
,
16
(
r4
)
or
r12
,
r8
,
r12
stdu
r7
,
16
(
r3
)
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
bdnz
1
b
3
:
std
r12
,
8
(
r3
)
or
r7
,
r7
,
r6
4
:
std
r7
,
16
(
r3
)
5
:
srd
r12
,
r9
,
r11
or
r12
,
r8
,
r12
std
r12
,
24
(
r3
)
beqlr
cmpwi
cr1
,
r5
,
8
addi
r3
,
r3
,
32
sld
r9
,
r9
,
r10
ble
cr1
,
.
Ldo_tail
ld
r0
,
8
(
r4
)
srd
r7
,
r0
,
r11
or
r9
,
r7
,
r9
b
.
Ldo_tail
.
Ldst_unaligned
:
mtcrf
0x01
,
r6
#
put
#
bytes
to
8
B
bdry
into
cr7
subf
r5
,
r6
,
r5
li
r7
,
0
cmpldi
r1
,
r5
,
16
bf
cr7
*
4
+
3
,
1
f
lbz
r0
,
0
(
r4
)
stb
r0
,
0
(
r3
)
addi
r7
,
r7
,
1
1
:
bf
cr7
*
4
+
2
,
2
f
lhzx
r0
,
r7
,
r4
sthx
r0
,
r7
,
r3
addi
r7
,
r7
,
2
2
:
bf
cr7
*
4
+
1
,
3
f
lwzx
r0
,
r7
,
r4
stwx
r0
,
r7
,
r3
3
:
mtcrf
0x01
,
r5
add
r4
,
r6
,
r4
add
r3
,
r6
,
r3
b
.
Ldst_aligned
.
Lshort_copy
:
bf
cr7
*
4
+
0
,
1
f
lwz
r0
,
0
(
r4
)
lwz
r9
,
4
(
r4
)
addi
r4
,
r4
,
8
stw
r0
,
0
(
r3
)
stw
r9
,
4
(
r3
)
addi
r3
,
r3
,
8
1
:
bf
cr7
*
4
+
1
,
2
f
lwz
r0
,
0
(
r4
)
addi
r4
,
r4
,
4
stw
r0
,
0
(
r3
)
addi
r3
,
r3
,
4
2
:
bf
cr7
*
4
+
2
,
3
f
lhz
r0
,
0
(
r4
)
addi
r4
,
r4
,
2
sth
r0
,
0
(
r3
)
addi
r3
,
r3
,
2
3
:
bf
cr7
*
4
+
3
,
4
f
lbz
r0
,
0
(
r4
)
stb
r0
,
0
(
r3
)
4
:
blr
arch/ppc64/lib/sstep.c
deleted
100644 → 0
View file @
45424376
/*
* Single-step support.
*
* Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <asm/sstep.h>
#include <asm/processor.h>
extern
char
system_call_common
[];
/* Bits in SRR1 that are copied from MSR */
#define MSR_MASK 0xffffffff87c0ffff
/*
* Determine whether a conditional branch instruction would branch.
*/
static
int
branch_taken
(
unsigned
int
instr
,
struct
pt_regs
*
regs
)
{
unsigned
int
bo
=
(
instr
>>
21
)
&
0x1f
;
unsigned
int
bi
;
if
((
bo
&
4
)
==
0
)
{
/* decrement counter */
--
regs
->
ctr
;
if
(((
bo
>>
1
)
&
1
)
^
(
regs
->
ctr
==
0
))
return
0
;
}
if
((
bo
&
0x10
)
==
0
)
{
/* check bit from CR */
bi
=
(
instr
>>
16
)
&
0x1f
;
if
(((
regs
->
ccr
>>
(
31
-
bi
))
&
1
)
!=
((
bo
>>
3
)
&
1
))
return
0
;
}
return
1
;
}
/*
* Emulate instructions that cause a transfer of control.
* Returns 1 if the step was emulated, 0 if not,
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
int
emulate_step
(
struct
pt_regs
*
regs
,
unsigned
int
instr
)
{
unsigned
int
opcode
,
rd
;
unsigned
long
int
imm
;
opcode
=
instr
>>
26
;
switch
(
opcode
)
{
case
16
:
/* bc */
imm
=
(
signed
short
)(
instr
&
0xfffc
);
if
((
instr
&
2
)
==
0
)
imm
+=
regs
->
nip
;
regs
->
nip
+=
4
;
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
regs
->
nip
&=
0xffffffffUL
;
if
(
instr
&
1
)
regs
->
link
=
regs
->
nip
;
if
(
branch_taken
(
instr
,
regs
))
regs
->
nip
=
imm
;
return
1
;
case
17
:
/* sc */
/*
* N.B. this uses knowledge about how the syscall
* entry code works. If that is changed, this will
* need to be changed also.
*/
regs
->
gpr
[
9
]
=
regs
->
gpr
[
13
];
regs
->
gpr
[
11
]
=
regs
->
nip
+
4
;
regs
->
gpr
[
12
]
=
regs
->
msr
&
MSR_MASK
;
regs
->
gpr
[
13
]
=
(
unsigned
long
)
get_paca
();
regs
->
nip
=
(
unsigned
long
)
&
system_call_common
;
regs
->
msr
=
MSR_KERNEL
;
return
1
;
case
18
:
/* b */
imm
=
instr
&
0x03fffffc
;
if
(
imm
&
0x02000000
)
imm
-=
0x04000000
;
if
((
instr
&
2
)
==
0
)
imm
+=
regs
->
nip
;
if
(
instr
&
1
)
{
regs
->
link
=
regs
->
nip
+
4
;
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
regs
->
link
&=
0xffffffffUL
;
}
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
imm
&=
0xffffffffUL
;
regs
->
nip
=
imm
;
return
1
;
case
19
:
switch
(
instr
&
0x7fe
)
{
case
0x20
:
/* bclr */
case
0x420
:
/* bcctr */
imm
=
(
instr
&
0x400
)
?
regs
->
ctr
:
regs
->
link
;
regs
->
nip
+=
4
;
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
{
regs
->
nip
&=
0xffffffffUL
;
imm
&=
0xffffffffUL
;
}
if
(
instr
&
1
)
regs
->
link
=
regs
->
nip
;
if
(
branch_taken
(
instr
,
regs
))
regs
->
nip
=
imm
;
return
1
;
case
0x24
:
/* rfid, scary */
return
-
1
;
}
case
31
:
rd
=
(
instr
>>
21
)
&
0x1f
;
switch
(
instr
&
0x7fe
)
{
case
0xa6
:
/* mfmsr */
regs
->
gpr
[
rd
]
=
regs
->
msr
&
MSR_MASK
;
regs
->
nip
+=
4
;
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
regs
->
nip
&=
0xffffffffUL
;
return
1
;
case
0x164
:
/* mtmsrd */
/* only MSR_EE and MSR_RI get changed if bit 15 set */
/* mtmsrd doesn't change MSR_HV and MSR_ME */
imm
=
(
instr
&
0x10000
)
?
0x8002
:
0xefffffffffffefffUL
;
imm
=
(
regs
->
msr
&
MSR_MASK
&
~
imm
)
|
(
regs
->
gpr
[
rd
]
&
imm
);
if
((
imm
&
MSR_RI
)
==
0
)
/* can't step mtmsrd that would clear MSR_RI */
return
-
1
;
regs
->
msr
=
imm
;
regs
->
nip
+=
4
;
if
((
imm
&
MSR_SF
)
==
0
)
regs
->
nip
&=
0xffffffffUL
;
return
1
;
}
}
return
0
;
}
arch/ppc64/lib/strcase.c
deleted
100644 → 0
View file @
45424376
/*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/ctype.h>
int
strcasecmp
(
const
char
*
s1
,
const
char
*
s2
)
{
int
c1
,
c2
;
do
{
c1
=
tolower
(
*
s1
++
);
c2
=
tolower
(
*
s2
++
);
}
while
(
c1
==
c2
&&
c1
!=
0
);
return
c1
-
c2
;
}
int
strncasecmp
(
const
char
*
s1
,
const
char
*
s2
,
int
n
)
{
int
c1
,
c2
;
do
{
c1
=
tolower
(
*
s1
++
);
c2
=
tolower
(
*
s2
++
);
}
while
((
--
n
>
0
)
&&
c1
==
c2
&&
c1
!=
0
);
return
c1
-
c2
;
}
arch/ppc64/lib/string.S
View file @
22b28032
...
...
@@ -65,112 +65,6 @@ _GLOBAL(strlen)
subf
r3
,
r3
,
r4
blr
_GLOBAL
(
memset
)
neg
r0
,
r3
rlwimi
r4
,
r4
,
8
,
16
,
23
andi
.
r0
,
r0
,
7
/*
#
bytes
to
be
8
-
byte
aligned
*/
rlwimi
r4
,
r4
,
16
,
0
,
15
cmplw
cr1
,
r5
,
r0
/*
do
we
get
that
far
?
*/
rldimi
r4
,
r4
,
32
,
0
mtcrf
1
,
r0
mr
r6
,
r3
blt
cr1
,
8
f
beq
+
3
f
/*
if
already
8
-
byte
aligned
*/
subf
r5
,
r0
,
r5
bf
31
,
1
f
stb
r4
,
0
(
r6
)
addi
r6
,
r6
,
1
1
:
bf
30
,
2
f
sth
r4
,
0
(
r6
)
addi
r6
,
r6
,
2
2
:
bf
29
,
3
f
stw
r4
,
0
(
r6
)
addi
r6
,
r6
,
4
3
:
srdi
.
r0
,
r5
,
6
clrldi
r5
,
r5
,
58
mtctr
r0
beq
5
f
4
:
std
r4
,
0
(
r6
)
std
r4
,
8
(
r6
)
std
r4
,
16
(
r6
)
std
r4
,
24
(
r6
)
std
r4
,
32
(
r6
)
std
r4
,
40
(
r6
)
std
r4
,
48
(
r6
)
std
r4
,
56
(
r6
)
addi
r6
,
r6
,
64
bdnz
4
b
5
:
srwi
.
r0
,
r5
,
3
clrlwi
r5
,
r5
,
29
mtcrf
1
,
r0
beq
8
f
bf
29
,
6
f
std
r4
,
0
(
r6
)
std
r4
,
8
(
r6
)
std
r4
,
16
(
r6
)
std
r4
,
24
(
r6
)
addi
r6
,
r6
,
32
6
:
bf
30
,
7
f
std
r4
,
0
(
r6
)
std
r4
,
8
(
r6
)
addi
r6
,
r6
,
16
7
:
bf
31
,
8
f
std
r4
,
0
(
r6
)
addi
r6
,
r6
,
8
8
:
cmpwi
r5
,
0
mtcrf
1
,
r5
beqlr
+
bf
29
,
9
f
stw
r4
,
0
(
r6
)
addi
r6
,
r6
,
4
9
:
bf
30
,
10
f
sth
r4
,
0
(
r6
)
addi
r6
,
r6
,
2
10
:
bflr
31
stb
r4
,
0
(
r6
)
blr
_GLOBAL
(
memmove
)
cmplw
0
,
r3
,
r4
bgt
.
backwards_memcpy
b
.
memcpy
_GLOBAL
(
backwards_memcpy
)
rlwinm
.
r7
,
r5
,
32
-
3
,
3
,
31
/*
r0
=
r5
>>
3
*/
add
r6
,
r3
,
r5
add
r4
,
r4
,
r5
beq
2
f
andi
.
r0
,
r6
,
3
mtctr
r7
bne
5
f
1
:
lwz
r7
,-
4
(
r4
)
lwzu
r8
,-
8
(
r4
)
stw
r7
,-
4
(
r6
)
stwu
r8
,-
8
(
r6
)
bdnz
1
b
andi
.
r5
,
r5
,
7
2
:
cmplwi
0
,
r5
,
4
blt
3
f
lwzu
r0
,-
4
(
r4
)
subi
r5
,
r5
,
4
stwu
r0
,-
4
(
r6
)
3
:
cmpwi
0
,
r5
,
0
beqlr
mtctr
r5
4
:
lbzu
r0
,-
1
(
r4
)
stbu
r0
,-
1
(
r6
)
bdnz
4
b
blr
5
:
mtctr
r0
6
:
lbzu
r7
,-
1
(
r4
)
stbu
r7
,-
1
(
r6
)
bdnz
6
b
subf
r5
,
r0
,
r5
rlwinm
.
r7
,
r5
,
32
-
3
,
3
,
31
beq
2
b
mtctr
r7
b
1
b
_GLOBAL
(
memcmp
)
cmpwi
0
,
r5
,
0
ble
-
2
f
...
...
arch/ppc64/lib/usercopy.c
deleted
100644 → 0
View file @
45424376
/*
* Functions which are too large to be inlined.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <asm/uaccess.h>
unsigned
long
copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
if
(
likely
(
access_ok
(
VERIFY_READ
,
from
,
n
)))
n
=
__copy_from_user
(
to
,
from
,
n
);
else
memset
(
to
,
0
,
n
);
return
n
;
}
unsigned
long
copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
if
(
likely
(
access_ok
(
VERIFY_WRITE
,
to
,
n
)))
n
=
__copy_to_user
(
to
,
from
,
n
);
return
n
;
}
unsigned
long
copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_sleep
();
if
(
likely
(
access_ok
(
VERIFY_READ
,
from
,
n
)
&&
access_ok
(
VERIFY_WRITE
,
to
,
n
)))
n
=
__copy_tofrom_user
(
to
,
from
,
n
);
return
n
;
}
EXPORT_SYMBOL
(
copy_from_user
);
EXPORT_SYMBOL
(
copy_to_user
);
EXPORT_SYMBOL
(
copy_in_user
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment