Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
ab7c828b
Commit
ab7c828b
authored
Jun 08, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://are.twiddle.net/axp-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
6c94e37b
4964368c
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
87 additions
and
97 deletions
+87
-97
arch/alpha/kernel/asm-offsets.c
arch/alpha/kernel/asm-offsets.c
+1
-0
arch/alpha/kernel/head.S
arch/alpha/kernel/head.S
+2
-1
arch/alpha/kernel/systbls.S
arch/alpha/kernel/systbls.S
+1
-0
arch/alpha/kernel/traps.c
arch/alpha/kernel/traps.c
+40
-40
arch/alpha/lib/csum_partial_copy.c
arch/alpha/lib/csum_partial_copy.c
+2
-2
arch/alpha/lib/memmove.S
arch/alpha/lib/memmove.S
+9
-0
include/asm-alpha/ptrace.h
include/asm-alpha/ptrace.h
+2
-9
include/asm-alpha/string.h
include/asm-alpha/string.h
+1
-0
include/asm-alpha/uaccess.h
include/asm-alpha/uaccess.h
+26
-43
include/asm-alpha/unaligned.h
include/asm-alpha/unaligned.h
+1
-1
include/asm-alpha/unistd.h
include/asm-alpha/unistd.h
+2
-1
No files found.
arch/alpha/kernel/asm-offsets.c
View file @
ab7c828b
...
...
@@ -31,6 +31,7 @@ void foo(void)
DEFINE
(
TASK_TGID
,
offsetof
(
struct
task_struct
,
tgid
));
BLANK
();
DEFINE
(
SIZEOF_PT_REGS
,
sizeof
(
struct
pt_regs
));
DEFINE
(
PT_PTRACED
,
PT_PTRACED
);
DEFINE
(
CLONE_VM
,
CLONE_VM
);
DEFINE
(
CLONE_UNTRACED
,
CLONE_UNTRACED
);
...
...
arch/alpha/kernel/head.S
View file @
ab7c828b
...
...
@@ -9,6 +9,7 @@
#include <linux/config.h>
#include <asm/system.h>
#include <asm/asm_offsets.h>
.
globl
swapper_pg_dir
.
globl
_stext
...
...
@@ -25,7 +26,7 @@ __start:
/
*
We
need
to
get
current_task_info
loaded
up
...
*/
lda
$
8
,
init_thread_union
/
*
...
and
find
our
stack
...
*/
lda
$
30
,
0x4000
(
$
8
)
lda
$
30
,
0x4000
-
SIZEOF_PT_REGS
(
$
8
)
/
*
...
and
then
we
can
start
the
kernel
.
*/
jsr
$
26
,
start_kernel
call_pal
PAL_halt
...
...
arch/alpha/kernel/systbls.S
View file @
ab7c828b
...
...
@@ -442,6 +442,7 @@ sys_call_table:
.
quad
sys_clock_gettime
/*
420
*/
.
quad
sys_clock_getres
.
quad
sys_clock_nanosleep
.
quad
sys_semtimedop
.
size
sys_call_table
,
.
-
sys_call_table
.
type
sys_call_table
,
@
object
...
...
arch/alpha/kernel/traps.c
View file @
ab7c828b
...
...
@@ -485,9 +485,9 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extwh %2,%3,%2
\n
"
"3:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %1,3b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %2,3b-2b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
...
...
@@ -505,9 +505,9 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extlh %2,%3,%2
\n
"
"3:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %1,3b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %2,3b-2b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
...
...
@@ -525,9 +525,9 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extqh %2,%3,%2
\n
"
"3:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %1,3b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %2,3b-2b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
...
...
@@ -554,13 +554,13 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"4: stq_u %1,0(%5)
\n
"
"5:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %2,5b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %1,5b-2b(%0)
\n
"
" .
gprel32 3b
\n
"
" .
long 3b - .
\n
"
" lda $31,5b-3b(%0)
\n
"
" .
gprel32 4b
\n
"
" .
long 4b - .
\n
"
" lda $31,5b-4b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
),
...
...
@@ -584,13 +584,13 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"4: stq_u %1,0(%5)
\n
"
"5:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %2,5b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %1,5b-2b(%0)
\n
"
" .
gprel32 3b
\n
"
" .
long 3b - .
\n
"
" lda $31,5b-3b(%0)
\n
"
" .
gprel32 4b
\n
"
" .
long 4b - .
\n
"
" lda $31,5b-4b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
),
...
...
@@ -614,13 +614,13 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"4: stq_u %1,0(%5)
\n
"
"5:
\n
"
".section __ex_table,
\"
a
\"\n\t
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %2,5b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %1,5b-2b(%0)
\n
"
" .
gprel32 3b
\n
"
" .
long 3b - .
\n
"
" lda $31,5b-3b(%0)
\n
"
" .
gprel32 4b
\n
"
" .
long 4b - .
\n
"
" lda $31,5b-4b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
),
...
...
@@ -845,9 +845,9 @@ do_entUnaUser(void * va, unsigned long opcode,
" extwh %2,%3,%2
\n
"
"3:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %1,3b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %2,3b-2b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
...
...
@@ -865,9 +865,9 @@ do_entUnaUser(void * va, unsigned long opcode,
" extlh %2,%3,%2
\n
"
"3:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %1,3b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %2,3b-2b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
...
...
@@ -885,9 +885,9 @@ do_entUnaUser(void * va, unsigned long opcode,
" extqh %2,%3,%2
\n
"
"3:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %1,3b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %2,3b-2b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
...
...
@@ -905,9 +905,9 @@ do_entUnaUser(void * va, unsigned long opcode,
" extlh %2,%3,%2
\n
"
"3:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %1,3b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %2,3b-2b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
...
...
@@ -925,9 +925,9 @@ do_entUnaUser(void * va, unsigned long opcode,
" extqh %2,%3,%2
\n
"
"3:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %1,3b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %2,3b-2b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
)
...
...
@@ -954,13 +954,13 @@ do_entUnaUser(void * va, unsigned long opcode,
"4: stq_u %1,0(%5)
\n
"
"5:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %2,5b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %1,5b-2b(%0)
\n
"
" .
gprel32 3b
\n
"
" .
long 3b - .
\n
"
" lda $31,5b-3b(%0)
\n
"
" .
gprel32 4b
\n
"
" .
long 4b - .
\n
"
" lda $31,5b-4b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
),
...
...
@@ -988,13 +988,13 @@ do_entUnaUser(void * va, unsigned long opcode,
"4: stq_u %1,0(%5)
\n
"
"5:
\n
"
".section __ex_table,
\"
a
\"\n
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %2,5b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %1,5b-2b(%0)
\n
"
" .
gprel32 3b
\n
"
" .
long 3b - .
\n
"
" lda $31,5b-3b(%0)
\n
"
" .
gprel32 4b
\n
"
" .
long 4b - .
\n
"
" lda $31,5b-4b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
),
...
...
@@ -1022,13 +1022,13 @@ do_entUnaUser(void * va, unsigned long opcode,
"4: stq_u %1,0(%5)
\n
"
"5:
\n
"
".section __ex_table,
\"
a
\"\n\t
"
" .
gprel32 1b
\n
"
" .
long 1b - .
\n
"
" lda %2,5b-1b(%0)
\n
"
" .
gprel32 2b
\n
"
" .
long 2b - .
\n
"
" lda %1,5b-2b(%0)
\n
"
" .
gprel32 3b
\n
"
" .
long 3b - .
\n
"
" lda $31,5b-3b(%0)
\n
"
" .
gprel32 4b
\n
"
" .
long 4b - .
\n
"
" lda $31,5b-4b(%0)
\n
"
".previous"
:
"=r"
(
error
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
),
...
...
arch/alpha/lib/csum_partial_copy.c
View file @
ab7c828b
...
...
@@ -46,7 +46,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
"1: ldq_u %0,%2\n" \
"2:\n" \
".section __ex_table,\"a\"\n" \
" .
gprel32 1b
\n" \
" .
long 1b - .
\n" \
" lda %0,2b-1b(%1)\n" \
".previous" \
: "=r"(x), "=r"(__guu_err) \
...
...
@@ -61,7 +61,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
"1: stq_u %2,%1\n" \
"2:\n" \
".section __ex_table,\"a\"\n" \
" .
gprel32 1b
" \
" .
long 1b - .
" \
" lda $31,2b-1b(%0)\n" \
".previous" \
: "=r"(__puu_err) \
...
...
arch/alpha/lib/memmove.S
View file @
ab7c828b
...
...
@@ -11,6 +11,15 @@
.
set
noreorder
.
text
.
align
4
.
globl
bcopy
.
ent
bcopy
bcopy
:
mov
$
16
,
$
0
mov
$
17
,
$
16
mov
$
0
,
$
17
.
end
bcopy
.
align
4
.
globl
memmove
.
ent
memmove
...
...
include/asm-alpha/ptrace.h
View file @
ab7c828b
...
...
@@ -71,15 +71,8 @@ struct switch_stack {
#define instruction_pointer(regs) ((regs)->pc)
extern
void
show_regs
(
struct
pt_regs
*
);
/*
* TODO: if kernel-only threads do not have a dummy pt_regs structure at the
* top of the stack, this would cause kernel stack corruption. Either check
* first that we're not dealing with a kernel thread or change the kernel
* stacks to allocate a dummy pt_regs structure.
*/
#define alpha_task_regs(task) ((struct pt_regs *) \
((long) task->thread_info + PAGE_SIZE) - 1)
#define alpha_task_regs(task) \
((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1)
#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0)
...
...
include/asm-alpha/string.h
View file @
ab7c828b
...
...
@@ -13,6 +13,7 @@
#define __HAVE_ARCH_MEMCPY
extern
void
*
memcpy
(
void
*
,
const
void
*
,
size_t
);
#define __HAVE_ARCH_MEMMOVE
#define __HAVE_ARCH_BCOPY
extern
void
*
memmove
(
void
*
,
const
void
*
,
size_t
);
/* For backward compatibility with modules. Unused otherwise. */
...
...
include/asm-alpha/uaccess.h
View file @
ab7c828b
...
...
@@ -340,25 +340,31 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
* Complex access routines
*/
/* This little bit of silliness is to get the GP loaded for a function
that ordinarily wouldn't. Otherwise we could have it done by the macro
directly, which can be optimized the linker. */
#ifdef MODULE
#define __module_address(sym) "r"(sym),
#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
#else
#define __module_address(sym)
#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
#endif
extern
void
__copy_user
(
void
);
extern
inline
long
__copy_tofrom_user_nocheck
(
void
*
to
,
const
void
*
from
,
long
len
)
{
/* This little bit of silliness is to get the GP loaded for
a function that ordinarily wouldn't. Otherwise we could
have it done by the macro directly, which can be optimized
the linker. */
register
void
*
pv
__asm__
(
"$27"
)
=
__copy_user
;
register
void
*
__cu_to
__asm__
(
"$6"
)
=
to
;
register
const
void
*
__cu_from
__asm__
(
"$7"
)
=
from
;
register
long
__cu_len
__asm__
(
"$0"
)
=
len
;
__asm__
__volatile__
(
"jsr $28,(%3),__copy_user"
:
"=r"
(
__cu_len
),
"=r"
(
__cu_from
),
"=r"
(
__cu_to
),
"=r"
(
pv
)
:
"0"
(
__cu_len
),
"1"
(
__cu_from
),
"2"
(
__cu_to
),
"3"
(
pv
)
__module_call
(
28
,
3
,
__copy_user
)
:
"=r"
(
__cu_len
),
"=r"
(
__cu_from
),
"=r"
(
__cu_to
)
:
__module_address
(
__copy_user
)
"0"
(
__cu_len
),
"1"
(
__cu_from
),
"2"
(
__cu_to
)
:
"$1"
,
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$28"
,
"memory"
);
return
__cu_len
;
...
...
@@ -367,20 +373,8 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
extern
inline
long
__copy_tofrom_user
(
void
*
to
,
const
void
*
from
,
long
len
,
const
void
*
validate
)
{
if
(
__access_ok
((
long
)
validate
,
len
,
get_fs
()))
{
register
void
*
pv
__asm__
(
"$27"
)
=
__copy_user
;
register
void
*
__cu_to
__asm__
(
"$6"
)
=
to
;
register
const
void
*
__cu_from
__asm__
(
"$7"
)
=
from
;
register
long
__cu_len
__asm__
(
"$0"
)
=
len
;
__asm__
__volatile__
(
"jsr $28,(%3),__copy_user"
:
"=r"
(
__cu_len
),
"=r"
(
__cu_from
),
"=r"
(
__cu_to
),
"=r"
(
pv
)
:
"0"
(
__cu_len
),
"1"
(
__cu_from
),
"2"
(
__cu_to
),
"3"
(
pv
)
:
"$1"
,
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$28"
,
"memory"
);
len
=
__cu_len
;
}
if
(
__access_ok
((
long
)
validate
,
len
,
get_fs
()))
len
=
__copy_tofrom_user_nocheck
(
to
,
from
,
len
);
return
len
;
}
...
...
@@ -404,18 +398,13 @@ extern void __do_clear_user(void);
extern
inline
long
__clear_user
(
void
*
to
,
long
len
)
{
/* This little bit of silliness is to get the GP loaded for
a function that ordinarily wouldn't. Otherwise we could
have it done by the macro directly, which can be optimized
the linker. */
register
void
*
pv
__asm__
(
"$27"
)
=
__do_clear_user
;
register
void
*
__cl_to
__asm__
(
"$6"
)
=
to
;
register
long
__cl_len
__asm__
(
"$0"
)
=
len
;
__asm__
__volatile__
(
"jsr $28,(%2),__do_clear_user"
:
"=r"
(
__cl_len
),
"=r"
(
__cl_to
),
"=r"
(
pv
)
:
"0"
(
__cl_len
),
"1"
(
__cl_to
),
"2"
(
pv
)
__module_call
(
28
,
2
,
__do_clear_user
)
:
"=r"
(
__cl_len
),
"=r"
(
__cl_to
)
:
__module_address
(
__do_clear_user
)
"0"
(
__cl_len
),
"1"
(
__cl_to
)
:
"$1"
,
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$28"
,
"memory"
);
return
__cl_len
;
}
...
...
@@ -423,20 +412,14 @@ __clear_user(void *to, long len)
extern
inline
long
clear_user
(
void
*
to
,
long
len
)
{
if
(
__access_ok
((
long
)
to
,
len
,
get_fs
()))
{
register
void
*
pv
__asm__
(
"$27"
)
=
__do_clear_user
;
register
void
*
__cl_to
__asm__
(
"$6"
)
=
to
;
register
long
__cl_len
__asm__
(
"$0"
)
=
len
;
__asm__
__volatile__
(
"jsr $28,(%2),__do_clear_user"
:
"=r"
(
__cl_len
),
"=r"
(
__cl_to
),
"=r"
(
pv
)
:
"0"
(
__cl_len
),
"1"
(
__cl_to
),
"2"
(
pv
)
:
"$1"
,
"$2"
,
"$3"
,
"$4"
,
"$5"
,
"$28"
,
"memory"
);
len
=
__cl_len
;
}
if
(
__access_ok
((
long
)
to
,
len
,
get_fs
()))
len
=
__clear_user
(
to
,
len
);
return
len
;
}
#undef __module_address
#undef __module_call
/* Returns: -EFAULT if exception before terminator, N if the entire
buffer filled, else strlen. */
...
...
include/asm-alpha/unaligned.h
View file @
ab7c828b
...
...
@@ -14,7 +14,7 @@
* the get/put functions are indeed always optimized,
* and that we use the correct sizes.
*/
extern
void
bad_unaligned_access_length
(
void
);
extern
void
bad_unaligned_access_length
(
void
)
__attribute__
((
noreturn
))
;
/*
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
...
...
include/asm-alpha/unistd.h
View file @
ab7c828b
...
...
@@ -358,7 +358,8 @@
#define __NR_clock_gettime 420
#define __NR_clock_getres 421
#define __NR_clock_nanosleep 422
#define NR_SYSCALLS 423
#define __NR_semtimedop 423
#define NR_SYSCALLS 424
#if defined(__GNUC__)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment