Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
78514c10
Commit
78514c10
authored
Apr 17, 2008
by
Tony Luck
Browse files
Options
Browse Files
Download
Plain Diff
Pull regset into release branch
parents
14d0647c
4cd8dc83
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
1544 additions
and
383 deletions
+1544
-383
arch/ia64/ia32/sys_ia32.c
arch/ia64/ia32/sys_ia32.c
+624
-25
arch/ia64/kernel/process.c
arch/ia64/kernel/process.c
+0
-30
arch/ia64/kernel/ptrace.c
arch/ia64/kernel/ptrace.c
+895
-322
include/asm-ia64/elf.h
include/asm-ia64/elf.h
+25
-6
No files found.
arch/ia64/ia32/sys_ia32.c
View file @
78514c10
...
@@ -38,6 +38,7 @@
...
@@ -38,6 +38,7 @@
#include <linux/eventpoll.h>
#include <linux/eventpoll.h>
#include <linux/personality.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/stat.h>
#include <linux/stat.h>
#include <linux/ipc.h>
#include <linux/ipc.h>
#include <linux/capability.h>
#include <linux/capability.h>
...
@@ -2387,16 +2388,45 @@ get_free_idx (void)
...
@@ -2387,16 +2388,45 @@ get_free_idx (void)
return
-
ESRCH
;
return
-
ESRCH
;
}
}
static
void
set_tls_desc
(
struct
task_struct
*
p
,
int
idx
,
const
struct
ia32_user_desc
*
info
,
int
n
)
{
struct
thread_struct
*
t
=
&
p
->
thread
;
struct
desc_struct
*
desc
=
&
t
->
tls_array
[
idx
-
GDT_ENTRY_TLS_MIN
];
int
cpu
;
/*
* We must not get preempted while modifying the TLS.
*/
cpu
=
get_cpu
();
while
(
n
--
>
0
)
{
if
(
LDT_empty
(
info
))
{
desc
->
a
=
0
;
desc
->
b
=
0
;
}
else
{
desc
->
a
=
LDT_entry_a
(
info
);
desc
->
b
=
LDT_entry_b
(
info
);
}
++
info
;
++
desc
;
}
if
(
t
==
&
current
->
thread
)
load_TLS
(
t
,
cpu
);
put_cpu
();
}
/*
/*
* Set a given TLS descriptor:
* Set a given TLS descriptor:
*/
*/
asmlinkage
int
asmlinkage
int
sys32_set_thread_area
(
struct
ia32_user_desc
__user
*
u_info
)
sys32_set_thread_area
(
struct
ia32_user_desc
__user
*
u_info
)
{
{
struct
thread_struct
*
t
=
&
current
->
thread
;
struct
ia32_user_desc
info
;
struct
ia32_user_desc
info
;
struct
desc_struct
*
desc
;
int
idx
;
int
cpu
,
idx
;
if
(
copy_from_user
(
&
info
,
u_info
,
sizeof
(
info
)))
if
(
copy_from_user
(
&
info
,
u_info
,
sizeof
(
info
)))
return
-
EFAULT
;
return
-
EFAULT
;
...
@@ -2416,18 +2446,7 @@ sys32_set_thread_area (struct ia32_user_desc __user *u_info)
...
@@ -2416,18 +2446,7 @@ sys32_set_thread_area (struct ia32_user_desc __user *u_info)
if
(
idx
<
GDT_ENTRY_TLS_MIN
||
idx
>
GDT_ENTRY_TLS_MAX
)
if
(
idx
<
GDT_ENTRY_TLS_MIN
||
idx
>
GDT_ENTRY_TLS_MAX
)
return
-
EINVAL
;
return
-
EINVAL
;
desc
=
t
->
tls_array
+
idx
-
GDT_ENTRY_TLS_MIN
;
set_tls_desc
(
current
,
idx
,
&
info
,
1
);
cpu
=
smp_processor_id
();
if
(
LDT_empty
(
&
info
))
{
desc
->
a
=
0
;
desc
->
b
=
0
;
}
else
{
desc
->
a
=
LDT_entry_a
(
&
info
);
desc
->
b
=
LDT_entry_b
(
&
info
);
}
load_TLS
(
t
,
cpu
);
return
0
;
return
0
;
}
}
...
@@ -2451,6 +2470,20 @@ sys32_set_thread_area (struct ia32_user_desc __user *u_info)
...
@@ -2451,6 +2470,20 @@ sys32_set_thread_area (struct ia32_user_desc __user *u_info)
#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
static
void
fill_user_desc
(
struct
ia32_user_desc
*
info
,
int
idx
,
const
struct
desc_struct
*
desc
)
{
info
->
entry_number
=
idx
;
info
->
base_addr
=
GET_BASE
(
desc
);
info
->
limit
=
GET_LIMIT
(
desc
);
info
->
seg_32bit
=
GET_32BIT
(
desc
);
info
->
contents
=
GET_CONTENTS
(
desc
);
info
->
read_exec_only
=
!
GET_WRITABLE
(
desc
);
info
->
limit_in_pages
=
GET_LIMIT_PAGES
(
desc
);
info
->
seg_not_present
=
!
GET_PRESENT
(
desc
);
info
->
useable
=
GET_USEABLE
(
desc
);
}
asmlinkage
int
asmlinkage
int
sys32_get_thread_area
(
struct
ia32_user_desc
__user
*
u_info
)
sys32_get_thread_area
(
struct
ia32_user_desc
__user
*
u_info
)
{
{
...
@@ -2464,22 +2497,588 @@ sys32_get_thread_area (struct ia32_user_desc __user *u_info)
...
@@ -2464,22 +2497,588 @@ sys32_get_thread_area (struct ia32_user_desc __user *u_info)
return
-
EINVAL
;
return
-
EINVAL
;
desc
=
current
->
thread
.
tls_array
+
idx
-
GDT_ENTRY_TLS_MIN
;
desc
=
current
->
thread
.
tls_array
+
idx
-
GDT_ENTRY_TLS_MIN
;
fill_user_desc
(
&
info
,
idx
,
desc
);
info
.
entry_number
=
idx
;
info
.
base_addr
=
GET_BASE
(
desc
);
info
.
limit
=
GET_LIMIT
(
desc
);
info
.
seg_32bit
=
GET_32BIT
(
desc
);
info
.
contents
=
GET_CONTENTS
(
desc
);
info
.
read_exec_only
=
!
GET_WRITABLE
(
desc
);
info
.
limit_in_pages
=
GET_LIMIT_PAGES
(
desc
);
info
.
seg_not_present
=
!
GET_PRESENT
(
desc
);
info
.
useable
=
GET_USEABLE
(
desc
);
if
(
copy_to_user
(
u_info
,
&
info
,
sizeof
(
info
)))
if
(
copy_to_user
(
u_info
,
&
info
,
sizeof
(
info
)))
return
-
EFAULT
;
return
-
EFAULT
;
return
0
;
return
0
;
}
}
struct
regset_get
{
void
*
kbuf
;
void
__user
*
ubuf
;
};
struct
regset_set
{
const
void
*
kbuf
;
const
void
__user
*
ubuf
;
};
struct
regset_getset
{
struct
task_struct
*
target
;
const
struct
user_regset
*
regset
;
union
{
struct
regset_get
get
;
struct
regset_set
set
;
}
u
;
unsigned
int
pos
;
unsigned
int
count
;
int
ret
;
};
static
void
getfpreg
(
struct
task_struct
*
task
,
int
regno
,
int
*
val
)
{
switch
(
regno
/
sizeof
(
int
))
{
case
0
:
*
val
=
task
->
thread
.
fcr
&
0xffff
;
break
;
case
1
:
*
val
=
task
->
thread
.
fsr
&
0xffff
;
break
;
case
2
:
*
val
=
(
task
->
thread
.
fsr
>>
16
)
&
0xffff
;
break
;
case
3
:
*
val
=
task
->
thread
.
fir
;
break
;
case
4
:
*
val
=
(
task
->
thread
.
fir
>>
32
)
&
0xffff
;
break
;
case
5
:
*
val
=
task
->
thread
.
fdr
;
break
;
case
6
:
*
val
=
(
task
->
thread
.
fdr
>>
32
)
&
0xffff
;
break
;
}
}
static
void
setfpreg
(
struct
task_struct
*
task
,
int
regno
,
int
val
)
{
switch
(
regno
/
sizeof
(
int
))
{
case
0
:
task
->
thread
.
fcr
=
(
task
->
thread
.
fcr
&
(
~
0x1f3f
))
|
(
val
&
0x1f3f
);
break
;
case
1
:
task
->
thread
.
fsr
=
(
task
->
thread
.
fsr
&
(
~
0xffff
))
|
val
;
break
;
case
2
:
task
->
thread
.
fsr
=
(
task
->
thread
.
fsr
&
(
~
0xffff0000
))
|
(
val
<<
16
);
break
;
case
3
:
task
->
thread
.
fir
=
(
task
->
thread
.
fir
&
(
~
0xffffffff
))
|
val
;
break
;
case
5
:
task
->
thread
.
fdr
=
(
task
->
thread
.
fdr
&
(
~
0xffffffff
))
|
val
;
break
;
}
}
static
void
access_fpreg_ia32
(
int
regno
,
void
*
reg
,
struct
pt_regs
*
pt
,
struct
switch_stack
*
sw
,
int
tos
,
int
write
)
{
void
*
f
;
if
((
regno
+=
tos
)
>=
8
)
regno
-=
8
;
if
(
regno
<
4
)
f
=
&
pt
->
f8
+
regno
;
else
if
(
regno
<=
7
)
f
=
&
sw
->
f12
+
(
regno
-
4
);
else
{
printk
(
KERN_ERR
"regno must be less than 7
\n
"
);
return
;
}
if
(
write
)
memcpy
(
f
,
reg
,
sizeof
(
struct
_fpreg_ia32
));
else
memcpy
(
reg
,
f
,
sizeof
(
struct
_fpreg_ia32
));
}
static
void
do_fpregs_get
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
struct
regset_getset
*
dst
=
arg
;
struct
task_struct
*
task
=
dst
->
target
;
struct
pt_regs
*
pt
;
int
start
,
end
,
tos
;
char
buf
[
80
];
if
(
dst
->
count
==
0
||
unw_unwind_to_user
(
info
)
<
0
)
return
;
if
(
dst
->
pos
<
7
*
sizeof
(
int
))
{
end
=
min
((
dst
->
pos
+
dst
->
count
),
(
unsigned
int
)(
7
*
sizeof
(
int
)));
for
(
start
=
dst
->
pos
;
start
<
end
;
start
+=
sizeof
(
int
))
getfpreg
(
task
,
start
,
(
int
*
)(
buf
+
start
));
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
buf
,
0
,
7
*
sizeof
(
int
));
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
if
(
dst
->
pos
<
sizeof
(
struct
ia32_user_i387_struct
))
{
pt
=
task_pt_regs
(
task
);
tos
=
(
task
->
thread
.
fsr
>>
11
)
&
7
;
end
=
min
(
dst
->
pos
+
dst
->
count
,
(
unsigned
int
)(
sizeof
(
struct
ia32_user_i387_struct
)));
start
=
(
dst
->
pos
-
7
*
sizeof
(
int
))
/
sizeof
(
struct
_fpreg_ia32
);
end
=
(
end
-
7
*
sizeof
(
int
))
/
sizeof
(
struct
_fpreg_ia32
);
for
(;
start
<
end
;
start
++
)
access_fpreg_ia32
(
start
,
(
struct
_fpreg_ia32
*
)
buf
+
start
,
pt
,
info
->
sw
,
tos
,
0
);
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
buf
,
7
*
sizeof
(
int
),
sizeof
(
struct
ia32_user_i387_struct
));
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
}
static
void
do_fpregs_set
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
struct
regset_getset
*
dst
=
arg
;
struct
task_struct
*
task
=
dst
->
target
;
struct
pt_regs
*
pt
;
char
buf
[
80
];
int
end
,
start
,
tos
;
if
(
dst
->
count
==
0
||
unw_unwind_to_user
(
info
)
<
0
)
return
;
if
(
dst
->
pos
<
7
*
sizeof
(
int
))
{
start
=
dst
->
pos
;
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
buf
,
0
,
7
*
sizeof
(
int
));
if
(
dst
->
ret
)
return
;
for
(;
start
<
dst
->
pos
;
start
+=
sizeof
(
int
))
setfpreg
(
task
,
start
,
*
((
int
*
)(
buf
+
start
)));
if
(
dst
->
count
==
0
)
return
;
}
if
(
dst
->
pos
<
sizeof
(
struct
ia32_user_i387_struct
))
{
start
=
(
dst
->
pos
-
7
*
sizeof
(
int
))
/
sizeof
(
struct
_fpreg_ia32
);
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
buf
,
7
*
sizeof
(
int
),
sizeof
(
struct
ia32_user_i387_struct
));
if
(
dst
->
ret
)
return
;
pt
=
task_pt_regs
(
task
);
tos
=
(
task
->
thread
.
fsr
>>
11
)
&
7
;
end
=
(
dst
->
pos
-
7
*
sizeof
(
int
))
/
sizeof
(
struct
_fpreg_ia32
);
for
(;
start
<
end
;
start
++
)
access_fpreg_ia32
(
start
,
(
struct
_fpreg_ia32
*
)
buf
+
start
,
pt
,
info
->
sw
,
tos
,
1
);
if
(
dst
->
count
==
0
)
return
;
}
}
#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
static
void
getfpxreg
(
struct
task_struct
*
task
,
int
start
,
int
end
,
char
*
buf
)
{
int
min_val
;
min_val
=
min
(
end
,
OFFSET
(
fop
));
while
(
start
<
min_val
)
{
if
(
start
==
OFFSET
(
cwd
))
*
((
short
*
)
buf
)
=
task
->
thread
.
fcr
&
0xffff
;
else
if
(
start
==
OFFSET
(
swd
))
*
((
short
*
)
buf
)
=
task
->
thread
.
fsr
&
0xffff
;
else
if
(
start
==
OFFSET
(
twd
))
*
((
short
*
)
buf
)
=
(
task
->
thread
.
fsr
>>
16
)
&
0xffff
;
buf
+=
2
;
start
+=
2
;
}
/* skip fop element */
if
(
start
==
OFFSET
(
fop
))
{
start
+=
2
;
buf
+=
2
;
}
while
(
start
<
end
)
{
if
(
start
==
OFFSET
(
fip
))
*
((
int
*
)
buf
)
=
task
->
thread
.
fir
;
else
if
(
start
==
OFFSET
(
fcs
))
*
((
int
*
)
buf
)
=
(
task
->
thread
.
fir
>>
32
)
&
0xffff
;
else
if
(
start
==
OFFSET
(
foo
))
*
((
int
*
)
buf
)
=
task
->
thread
.
fdr
;
else
if
(
start
==
OFFSET
(
fos
))
*
((
int
*
)
buf
)
=
(
task
->
thread
.
fdr
>>
32
)
&
0xffff
;
else
if
(
start
==
OFFSET
(
mxcsr
))
*
((
int
*
)
buf
)
=
((
task
->
thread
.
fcr
>>
32
)
&
0xff80
)
|
((
task
->
thread
.
fsr
>>
32
)
&
0x3f
);
buf
+=
4
;
start
+=
4
;
}
}
static
void
setfpxreg
(
struct
task_struct
*
task
,
int
start
,
int
end
,
char
*
buf
)
{
int
min_val
,
num32
;
short
num
;
unsigned
long
num64
;
min_val
=
min
(
end
,
OFFSET
(
fop
));
while
(
start
<
min_val
)
{
num
=
*
((
short
*
)
buf
);
if
(
start
==
OFFSET
(
cwd
))
{
task
->
thread
.
fcr
=
(
task
->
thread
.
fcr
&
(
~
0x1f3f
))
|
(
num
&
0x1f3f
);
}
else
if
(
start
==
OFFSET
(
swd
))
{
task
->
thread
.
fsr
=
(
task
->
thread
.
fsr
&
(
~
0xffff
))
|
num
;
}
else
if
(
start
==
OFFSET
(
twd
))
{
task
->
thread
.
fsr
=
(
task
->
thread
.
fsr
&
(
~
0xffff0000
))
|
(((
int
)
num
)
<<
16
);
}
buf
+=
2
;
start
+=
2
;
}
/* skip fop element */
if
(
start
==
OFFSET
(
fop
))
{
start
+=
2
;
buf
+=
2
;
}
while
(
start
<
end
)
{
num32
=
*
((
int
*
)
buf
);
if
(
start
==
OFFSET
(
fip
))
task
->
thread
.
fir
=
(
task
->
thread
.
fir
&
(
~
0xffffffff
))
|
num32
;
else
if
(
start
==
OFFSET
(
foo
))
task
->
thread
.
fdr
=
(
task
->
thread
.
fdr
&
(
~
0xffffffff
))
|
num32
;
else
if
(
start
==
OFFSET
(
mxcsr
))
{
num64
=
num32
&
0xff10
;
task
->
thread
.
fcr
=
(
task
->
thread
.
fcr
&
(
~
0xff1000000000UL
))
|
(
num64
<<
32
);
num64
=
num32
&
0x3f
;
task
->
thread
.
fsr
=
(
task
->
thread
.
fsr
&
(
~
0x3f00000000UL
))
|
(
num64
<<
32
);
}
buf
+=
4
;
start
+=
4
;
}
}
static
void
do_fpxregs_get
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
struct
regset_getset
*
dst
=
arg
;
struct
task_struct
*
task
=
dst
->
target
;
struct
pt_regs
*
pt
;
char
buf
[
128
];
int
start
,
end
,
tos
;
if
(
dst
->
count
==
0
||
unw_unwind_to_user
(
info
)
<
0
)
return
;
if
(
dst
->
pos
<
OFFSET
(
st_space
[
0
]))
{
end
=
min
(
dst
->
pos
+
dst
->
count
,
(
unsigned
int
)
32
);
getfpxreg
(
task
,
dst
->
pos
,
end
,
buf
);
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
buf
,
0
,
OFFSET
(
st_space
[
0
]));
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
if
(
dst
->
pos
<
OFFSET
(
xmm_space
[
0
]))
{
pt
=
task_pt_regs
(
task
);
tos
=
(
task
->
thread
.
fsr
>>
11
)
&
7
;
end
=
min
(
dst
->
pos
+
dst
->
count
,
(
unsigned
int
)
OFFSET
(
xmm_space
[
0
]));
start
=
(
dst
->
pos
-
OFFSET
(
st_space
[
0
]))
/
16
;
end
=
(
end
-
OFFSET
(
st_space
[
0
]))
/
16
;
for
(;
start
<
end
;
start
++
)
access_fpreg_ia32
(
start
,
buf
+
16
*
start
,
pt
,
info
->
sw
,
tos
,
0
);
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
buf
,
OFFSET
(
st_space
[
0
]),
OFFSET
(
xmm_space
[
0
]));
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
if
(
dst
->
pos
<
OFFSET
(
padding
[
0
]))
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
&
info
->
sw
->
f16
,
OFFSET
(
xmm_space
[
0
]),
OFFSET
(
padding
[
0
]));
}
static
void
do_fpxregs_set
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
struct
regset_getset
*
dst
=
arg
;
struct
task_struct
*
task
=
dst
->
target
;
char
buf
[
128
];
int
start
,
end
;
if
(
dst
->
count
==
0
||
unw_unwind_to_user
(
info
)
<
0
)
return
;
if
(
dst
->
pos
<
OFFSET
(
st_space
[
0
]))
{
start
=
dst
->
pos
;
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
buf
,
0
,
OFFSET
(
st_space
[
0
]));
if
(
dst
->
ret
)
return
;
setfpxreg
(
task
,
start
,
dst
->
pos
,
buf
);
if
(
dst
->
count
==
0
)
return
;
}
if
(
dst
->
pos
<
OFFSET
(
xmm_space
[
0
]))
{
struct
pt_regs
*
pt
;
int
tos
;
pt
=
task_pt_regs
(
task
);
tos
=
(
task
->
thread
.
fsr
>>
11
)
&
7
;
start
=
(
dst
->
pos
-
OFFSET
(
st_space
[
0
]))
/
16
;
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
buf
,
OFFSET
(
st_space
[
0
]),
OFFSET
(
xmm_space
[
0
]));
if
(
dst
->
ret
)
return
;
end
=
(
dst
->
pos
-
OFFSET
(
st_space
[
0
]))
/
16
;
for
(;
start
<
end
;
start
++
)
access_fpreg_ia32
(
start
,
buf
+
16
*
start
,
pt
,
info
->
sw
,
tos
,
1
);
if
(
dst
->
count
==
0
)
return
;
}
if
(
dst
->
pos
<
OFFSET
(
padding
[
0
]))
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
&
info
->
sw
->
f16
,
OFFSET
(
xmm_space
[
0
]),
OFFSET
(
padding
[
0
]));
}
#undef OFFSET
static
int
do_regset_call
(
void
(
*
call
)(
struct
unw_frame_info
*
,
void
*
),
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
const
void
*
kbuf
,
const
void
__user
*
ubuf
)
{
struct
regset_getset
info
=
{
.
target
=
target
,
.
regset
=
regset
,
.
pos
=
pos
,
.
count
=
count
,
.
u
.
set
=
{
.
kbuf
=
kbuf
,
.
ubuf
=
ubuf
},
.
ret
=
0
};
if
(
target
==
current
)
unw_init_running
(
call
,
&
info
);
else
{
struct
unw_frame_info
ufi
;
memset
(
&
ufi
,
0
,
sizeof
(
ufi
));
unw_init_from_blocked_task
(
&
ufi
,
target
);
(
*
call
)(
&
ufi
,
&
info
);
}
return
info
.
ret
;
}
static
int
ia32_fpregs_get
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
void
*
kbuf
,
void
__user
*
ubuf
)
{
return
do_regset_call
(
do_fpregs_get
,
target
,
regset
,
pos
,
count
,
kbuf
,
ubuf
);
}
static
int
ia32_fpregs_set
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
const
void
*
kbuf
,
const
void
__user
*
ubuf
)
{
return
do_regset_call
(
do_fpregs_set
,
target
,
regset
,
pos
,
count
,
kbuf
,
ubuf
);
}
static
int
ia32_fpxregs_get
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
void
*
kbuf
,
void
__user
*
ubuf
)
{
return
do_regset_call
(
do_fpxregs_get
,
target
,
regset
,
pos
,
count
,
kbuf
,
ubuf
);
}
static
int
ia32_fpxregs_set
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
const
void
*
kbuf
,
const
void
__user
*
ubuf
)
{
return
do_regset_call
(
do_fpxregs_set
,
target
,
regset
,
pos
,
count
,
kbuf
,
ubuf
);
}
static
int
ia32_genregs_get
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
void
*
kbuf
,
void
__user
*
ubuf
)
{
if
(
kbuf
)
{
u32
*
kp
=
kbuf
;
while
(
count
>
0
)
{
*
kp
++
=
getreg
(
target
,
pos
);
pos
+=
4
;
count
-=
4
;
}
}
else
{
u32
__user
*
up
=
ubuf
;
while
(
count
>
0
)
{
if
(
__put_user
(
getreg
(
target
,
pos
),
up
++
))
return
-
EFAULT
;
pos
+=
4
;
count
-=
4
;
}
}
return
0
;
}
static
int
ia32_genregs_set
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
const
void
*
kbuf
,
const
void
__user
*
ubuf
)
{
int
ret
=
0
;
if
(
kbuf
)
{
const
u32
*
kp
=
kbuf
;
while
(
!
ret
&&
count
>
0
)
{
putreg
(
target
,
pos
,
*
kp
++
);
pos
+=
4
;
count
-=
4
;
}
}
else
{
const
u32
__user
*
up
=
ubuf
;
u32
val
;
while
(
!
ret
&&
count
>
0
)
{
ret
=
__get_user
(
val
,
up
++
);
if
(
!
ret
)
putreg
(
target
,
pos
,
val
);
pos
+=
4
;
count
-=
4
;
}
}
return
ret
;
}
static
int
ia32_tls_active
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
)
{
struct
thread_struct
*
t
=
&
target
->
thread
;
int
n
=
GDT_ENTRY_TLS_ENTRIES
;
while
(
n
>
0
&&
desc_empty
(
&
t
->
tls_array
[
n
-
1
]))
--
n
;
return
n
;
}
static
int
ia32_tls_get
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
void
*
kbuf
,
void
__user
*
ubuf
)
{
const
struct
desc_struct
*
tls
;
if
(
pos
>
GDT_ENTRY_TLS_ENTRIES
*
sizeof
(
struct
ia32_user_desc
)
||
(
pos
%
sizeof
(
struct
ia32_user_desc
))
!=
0
||
(
count
%
sizeof
(
struct
ia32_user_desc
))
!=
0
)
return
-
EINVAL
;
pos
/=
sizeof
(
struct
ia32_user_desc
);
count
/=
sizeof
(
struct
ia32_user_desc
);
tls
=
&
target
->
thread
.
tls_array
[
pos
];
if
(
kbuf
)
{
struct
ia32_user_desc
*
info
=
kbuf
;
while
(
count
--
>
0
)
fill_user_desc
(
info
++
,
GDT_ENTRY_TLS_MIN
+
pos
++
,
tls
++
);
}
else
{
struct
ia32_user_desc
__user
*
u_info
=
ubuf
;
while
(
count
--
>
0
)
{
struct
ia32_user_desc
info
;
fill_user_desc
(
&
info
,
GDT_ENTRY_TLS_MIN
+
pos
++
,
tls
++
);
if
(
__copy_to_user
(
u_info
++
,
&
info
,
sizeof
(
info
)))
return
-
EFAULT
;
}
}
return
0
;
}
static
int
ia32_tls_set
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
const
void
*
kbuf
,
const
void
__user
*
ubuf
)
{
struct
ia32_user_desc
infobuf
[
GDT_ENTRY_TLS_ENTRIES
];
const
struct
ia32_user_desc
*
info
;
if
(
pos
>
GDT_ENTRY_TLS_ENTRIES
*
sizeof
(
struct
ia32_user_desc
)
||
(
pos
%
sizeof
(
struct
ia32_user_desc
))
!=
0
||
(
count
%
sizeof
(
struct
ia32_user_desc
))
!=
0
)
return
-
EINVAL
;
if
(
kbuf
)
info
=
kbuf
;
else
if
(
__copy_from_user
(
infobuf
,
ubuf
,
count
))
return
-
EFAULT
;
else
info
=
infobuf
;
set_tls_desc
(
target
,
GDT_ENTRY_TLS_MIN
+
(
pos
/
sizeof
(
struct
ia32_user_desc
)),
info
,
count
/
sizeof
(
struct
ia32_user_desc
));
return
0
;
}
/*
* This should match arch/i386/kernel/ptrace.c:native_regsets.
* XXX ioperm? vm86?
*/
static
const
struct
user_regset
ia32_regsets
[]
=
{
{
.
core_note_type
=
NT_PRSTATUS
,
.
n
=
sizeof
(
struct
user_regs_struct32
)
/
4
,
.
size
=
4
,
.
align
=
4
,
.
get
=
ia32_genregs_get
,
.
set
=
ia32_genregs_set
},
{
.
core_note_type
=
NT_PRFPREG
,
.
n
=
sizeof
(
struct
ia32_user_i387_struct
)
/
4
,
.
size
=
4
,
.
align
=
4
,
.
get
=
ia32_fpregs_get
,
.
set
=
ia32_fpregs_set
},
{
.
core_note_type
=
NT_PRXFPREG
,
.
n
=
sizeof
(
struct
ia32_user_fxsr_struct
)
/
4
,
.
size
=
4
,
.
align
=
4
,
.
get
=
ia32_fpxregs_get
,
.
set
=
ia32_fpxregs_set
},
{
.
core_note_type
=
NT_386_TLS
,
.
n
=
GDT_ENTRY_TLS_ENTRIES
,
.
bias
=
GDT_ENTRY_TLS_MIN
,
.
size
=
sizeof
(
struct
ia32_user_desc
),
.
align
=
sizeof
(
struct
ia32_user_desc
),
.
active
=
ia32_tls_active
,
.
get
=
ia32_tls_get
,
.
set
=
ia32_tls_set
,
},
};
const
struct
user_regset_view
user_ia32_view
=
{
.
name
=
"i386"
,
.
e_machine
=
EM_386
,
.
regsets
=
ia32_regsets
,
.
n
=
ARRAY_SIZE
(
ia32_regsets
)
};
long
sys32_fadvise64_64
(
int
fd
,
__u32
offset_low
,
__u32
offset_high
,
long
sys32_fadvise64_64
(
int
fd
,
__u32
offset_low
,
__u32
offset_high
,
__u32
len_low
,
__u32
len_high
,
int
advice
)
__u32
len_low
,
__u32
len_high
,
int
advice
)
{
{
...
...
arch/ia64/kernel/process.c
View file @
78514c10
...
@@ -625,42 +625,12 @@ do_dump_fpu (struct unw_frame_info *info, void *arg)
...
@@ -625,42 +625,12 @@ do_dump_fpu (struct unw_frame_info *info, void *arg)
do_dump_task_fpu
(
current
,
info
,
arg
);
do_dump_task_fpu
(
current
,
info
,
arg
);
}
}
int
dump_task_regs
(
struct
task_struct
*
task
,
elf_gregset_t
*
regs
)
{
struct
unw_frame_info
tcore_info
;
if
(
current
==
task
)
{
unw_init_running
(
do_copy_regs
,
regs
);
}
else
{
memset
(
&
tcore_info
,
0
,
sizeof
(
tcore_info
));
unw_init_from_blocked_task
(
&
tcore_info
,
task
);
do_copy_task_regs
(
task
,
&
tcore_info
,
regs
);
}
return
1
;
}
void
void
ia64_elf_core_copy_regs
(
struct
pt_regs
*
pt
,
elf_gregset_t
dst
)
ia64_elf_core_copy_regs
(
struct
pt_regs
*
pt
,
elf_gregset_t
dst
)
{
{
unw_init_running
(
do_copy_regs
,
dst
);
unw_init_running
(
do_copy_regs
,
dst
);
}
}
int
dump_task_fpu
(
struct
task_struct
*
task
,
elf_fpregset_t
*
dst
)
{
struct
unw_frame_info
tcore_info
;
if
(
current
==
task
)
{
unw_init_running
(
do_dump_fpu
,
dst
);
}
else
{
memset
(
&
tcore_info
,
0
,
sizeof
(
tcore_info
));
unw_init_from_blocked_task
(
&
tcore_info
,
task
);
do_dump_task_fpu
(
task
,
&
tcore_info
,
dst
);
}
return
1
;
}
int
int
dump_fpu
(
struct
pt_regs
*
pt
,
elf_fpregset_t
dst
)
dump_fpu
(
struct
pt_regs
*
pt
,
elf_fpregset_t
dst
)
{
{
...
...
arch/ia64/kernel/ptrace.c
View file @
78514c10
...
@@ -3,6 +3,9 @@
...
@@ -3,6 +3,9 @@
*
*
* Copyright (C) 1999-2005 Hewlett-Packard Co
* Copyright (C) 1999-2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2006 Intel Co
* 2006-08-12 - IA64 Native Utrace implementation support added by
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*
*
* Derived from the x86 and Alpha versions.
* Derived from the x86 and Alpha versions.
*/
*/
...
@@ -17,6 +20,8 @@
...
@@ -17,6 +20,8 @@
#include <linux/security.h>
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/audit.h>
#include <linux/signal.h>
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/processor.h>
...
@@ -740,25 +745,6 @@ ia64_sync_fph (struct task_struct *task)
...
@@ -740,25 +745,6 @@ ia64_sync_fph (struct task_struct *task)
psr
->
dfh
=
1
;
psr
->
dfh
=
1
;
}
}
static
int
access_fr
(
struct
unw_frame_info
*
info
,
int
regnum
,
int
hi
,
unsigned
long
*
data
,
int
write_access
)
{
struct
ia64_fpreg
fpval
;
int
ret
;
ret
=
unw_get_fr
(
info
,
regnum
,
&
fpval
);
if
(
ret
<
0
)
return
ret
;
if
(
write_access
)
{
fpval
.
u
.
bits
[
hi
]
=
*
data
;
ret
=
unw_set_fr
(
info
,
regnum
,
fpval
);
}
else
*
data
=
fpval
.
u
.
bits
[
hi
];
return
ret
;
}
/*
/*
* Change the machine-state of CHILD such that it will return via the normal
* Change the machine-state of CHILD such that it will return via the normal
* kernel exit-path, rather than the syscall-exit path.
* kernel exit-path, rather than the syscall-exit path.
...
@@ -860,309 +846,7 @@ access_nat_bits (struct task_struct *child, struct pt_regs *pt,
...
@@ -860,309 +846,7 @@ access_nat_bits (struct task_struct *child, struct pt_regs *pt,
static
int
static
int
access_uarea
(
struct
task_struct
*
child
,
unsigned
long
addr
,
access_uarea
(
struct
task_struct
*
child
,
unsigned
long
addr
,
unsigned
long
*
data
,
int
write_access
)
unsigned
long
*
data
,
int
write_access
);
{
unsigned
long
*
ptr
,
regnum
,
urbs_end
,
cfm
;
struct
switch_stack
*
sw
;
struct
pt_regs
*
pt
;
# define pt_reg_addr(pt, reg) ((void *) \
((unsigned long) (pt) \
+ offsetof(struct pt_regs, reg)))
pt
=
task_pt_regs
(
child
);
sw
=
(
struct
switch_stack
*
)
(
child
->
thread
.
ksp
+
16
);
if
((
addr
&
0x7
)
!=
0
)
{
dprintk
(
"ptrace: unaligned register address 0x%lx
\n
"
,
addr
);
return
-
1
;
}
if
(
addr
<
PT_F127
+
16
)
{
/* accessing fph */
if
(
write_access
)
ia64_sync_fph
(
child
);
else
ia64_flush_fph
(
child
);
ptr
=
(
unsigned
long
*
)
((
unsigned
long
)
&
child
->
thread
.
fph
+
addr
);
}
else
if
((
addr
>=
PT_F10
)
&&
(
addr
<
PT_F11
+
16
))
{
/* scratch registers untouched by kernel (saved in pt_regs) */
ptr
=
pt_reg_addr
(
pt
,
f10
)
+
(
addr
-
PT_F10
);
}
else
if
(
addr
>=
PT_F12
&&
addr
<
PT_F15
+
16
)
{
/*
* Scratch registers untouched by kernel (saved in
* switch_stack).
*/
ptr
=
(
unsigned
long
*
)
((
long
)
sw
+
(
addr
-
PT_NAT_BITS
-
32
));
}
else
if
(
addr
<
PT_AR_LC
+
8
)
{
/* preserved state: */
struct
unw_frame_info
info
;
char
nat
=
0
;
int
ret
;
unw_init_from_blocked_task
(
&
info
,
child
);
if
(
unw_unwind_to_user
(
&
info
)
<
0
)
return
-
1
;
switch
(
addr
)
{
case
PT_NAT_BITS
:
return
access_nat_bits
(
child
,
pt
,
&
info
,
data
,
write_access
);
case
PT_R4
:
case
PT_R5
:
case
PT_R6
:
case
PT_R7
:
if
(
write_access
)
{
/* read NaT bit first: */
unsigned
long
dummy
;
ret
=
unw_get_gr
(
&
info
,
(
addr
-
PT_R4
)
/
8
+
4
,
&
dummy
,
&
nat
);
if
(
ret
<
0
)
return
ret
;
}
return
unw_access_gr
(
&
info
,
(
addr
-
PT_R4
)
/
8
+
4
,
data
,
&
nat
,
write_access
);
case
PT_B1
:
case
PT_B2
:
case
PT_B3
:
case
PT_B4
:
case
PT_B5
:
return
unw_access_br
(
&
info
,
(
addr
-
PT_B1
)
/
8
+
1
,
data
,
write_access
);
case
PT_AR_EC
:
return
unw_access_ar
(
&
info
,
UNW_AR_EC
,
data
,
write_access
);
case
PT_AR_LC
:
return
unw_access_ar
(
&
info
,
UNW_AR_LC
,
data
,
write_access
);
default:
if
(
addr
>=
PT_F2
&&
addr
<
PT_F5
+
16
)
return
access_fr
(
&
info
,
(
addr
-
PT_F2
)
/
16
+
2
,
(
addr
&
8
)
!=
0
,
data
,
write_access
);
else
if
(
addr
>=
PT_F16
&&
addr
<
PT_F31
+
16
)
return
access_fr
(
&
info
,
(
addr
-
PT_F16
)
/
16
+
16
,
(
addr
&
8
)
!=
0
,
data
,
write_access
);
else
{
dprintk
(
"ptrace: rejecting access to register "
"address 0x%lx
\n
"
,
addr
);
return
-
1
;
}
}
}
else
if
(
addr
<
PT_F9
+
16
)
{
/* scratch state */
switch
(
addr
)
{
case
PT_AR_BSP
:
/*
* By convention, we use PT_AR_BSP to refer to
* the end of the user-level backing store.
* Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
* to get the real value of ar.bsp at the time
* the kernel was entered.
*
* Furthermore, when changing the contents of
* PT_AR_BSP (or PT_CFM) while the task is
* blocked in a system call, convert the state
* so that the non-system-call exit
* path is used. This ensures that the proper
* state will be picked up when resuming
* execution. However, it *also* means that
* once we write PT_AR_BSP/PT_CFM, it won't be
* possible to modify the syscall arguments of
* the pending system call any longer. This
* shouldn't be an issue because modifying
* PT_AR_BSP/PT_CFM generally implies that
* we're either abandoning the pending system
* call or that we defer it's re-execution
* (e.g., due to GDB doing an inferior
* function call).
*/
urbs_end
=
ia64_get_user_rbs_end
(
child
,
pt
,
&
cfm
);
if
(
write_access
)
{
if
(
*
data
!=
urbs_end
)
{
if
(
in_syscall
(
pt
))
convert_to_non_syscall
(
child
,
pt
,
cfm
);
/*
* Simulate user-level write
* of ar.bsp:
*/
pt
->
loadrs
=
0
;
pt
->
ar_bspstore
=
*
data
;
}
}
else
*
data
=
urbs_end
;
return
0
;
case
PT_CFM
:
urbs_end
=
ia64_get_user_rbs_end
(
child
,
pt
,
&
cfm
);
if
(
write_access
)
{
if
(((
cfm
^
*
data
)
&
PFM_MASK
)
!=
0
)
{
if
(
in_syscall
(
pt
))
convert_to_non_syscall
(
child
,
pt
,
cfm
);
pt
->
cr_ifs
=
((
pt
->
cr_ifs
&
~
PFM_MASK
)
|
(
*
data
&
PFM_MASK
));
}
}
else
*
data
=
cfm
;
return
0
;
case
PT_CR_IPSR
:
if
(
write_access
)
{
unsigned
long
tmp
=
*
data
;
/* psr.ri==3 is a reserved value: SDM 2:25 */
if
((
tmp
&
IA64_PSR_RI
)
==
IA64_PSR_RI
)
tmp
&=
~
IA64_PSR_RI
;
pt
->
cr_ipsr
=
((
tmp
&
IPSR_MASK
)
|
(
pt
->
cr_ipsr
&
~
IPSR_MASK
));
}
else
*
data
=
(
pt
->
cr_ipsr
&
IPSR_MASK
);
return
0
;
case
PT_AR_RSC
:
if
(
write_access
)
pt
->
ar_rsc
=
*
data
|
(
3
<<
2
);
/* force PL3 */
else
*
data
=
pt
->
ar_rsc
;
return
0
;
case
PT_AR_RNAT
:
ptr
=
pt_reg_addr
(
pt
,
ar_rnat
);
break
;
case
PT_R1
:
ptr
=
pt_reg_addr
(
pt
,
r1
);
break
;
case
PT_R2
:
case
PT_R3
:
ptr
=
pt_reg_addr
(
pt
,
r2
)
+
(
addr
-
PT_R2
);
break
;
case
PT_R8
:
case
PT_R9
:
case
PT_R10
:
case
PT_R11
:
ptr
=
pt_reg_addr
(
pt
,
r8
)
+
(
addr
-
PT_R8
);
break
;
case
PT_R12
:
case
PT_R13
:
ptr
=
pt_reg_addr
(
pt
,
r12
)
+
(
addr
-
PT_R12
);
break
;
case
PT_R14
:
ptr
=
pt_reg_addr
(
pt
,
r14
);
break
;
case
PT_R15
:
ptr
=
pt_reg_addr
(
pt
,
r15
);
break
;
case
PT_R16
:
case
PT_R17
:
case
PT_R18
:
case
PT_R19
:
case
PT_R20
:
case
PT_R21
:
case
PT_R22
:
case
PT_R23
:
case
PT_R24
:
case
PT_R25
:
case
PT_R26
:
case
PT_R27
:
case
PT_R28
:
case
PT_R29
:
case
PT_R30
:
case
PT_R31
:
ptr
=
pt_reg_addr
(
pt
,
r16
)
+
(
addr
-
PT_R16
);
break
;
case
PT_B0
:
ptr
=
pt_reg_addr
(
pt
,
b0
);
break
;
case
PT_B6
:
ptr
=
pt_reg_addr
(
pt
,
b6
);
break
;
case
PT_B7
:
ptr
=
pt_reg_addr
(
pt
,
b7
);
break
;
case
PT_F6
:
case
PT_F6
+
8
:
case
PT_F7
:
case
PT_F7
+
8
:
case
PT_F8
:
case
PT_F8
+
8
:
case
PT_F9
:
case
PT_F9
+
8
:
ptr
=
pt_reg_addr
(
pt
,
f6
)
+
(
addr
-
PT_F6
);
break
;
case
PT_AR_BSPSTORE
:
ptr
=
pt_reg_addr
(
pt
,
ar_bspstore
);
break
;
case
PT_AR_UNAT
:
ptr
=
pt_reg_addr
(
pt
,
ar_unat
);
break
;
case
PT_AR_PFS
:
ptr
=
pt_reg_addr
(
pt
,
ar_pfs
);
break
;
case
PT_AR_CCV
:
ptr
=
pt_reg_addr
(
pt
,
ar_ccv
);
break
;
case
PT_AR_FPSR
:
ptr
=
pt_reg_addr
(
pt
,
ar_fpsr
);
break
;
case
PT_CR_IIP
:
ptr
=
pt_reg_addr
(
pt
,
cr_iip
);
break
;
case
PT_PR
:
ptr
=
pt_reg_addr
(
pt
,
pr
);
break
;
/* scratch register */
default:
/* disallow accessing anything else... */
dprintk
(
"ptrace: rejecting access to register "
"address 0x%lx
\n
"
,
addr
);
return
-
1
;
}
}
else
if
(
addr
<=
PT_AR_SSD
)
{
ptr
=
pt_reg_addr
(
pt
,
ar_csd
)
+
(
addr
-
PT_AR_CSD
);
}
else
{
/* access debug registers */
if
(
addr
>=
PT_IBR
)
{
regnum
=
(
addr
-
PT_IBR
)
>>
3
;
ptr
=
&
child
->
thread
.
ibr
[
0
];
}
else
{
regnum
=
(
addr
-
PT_DBR
)
>>
3
;
ptr
=
&
child
->
thread
.
dbr
[
0
];
}
if
(
regnum
>=
8
)
{
dprintk
(
"ptrace: rejecting access to register "
"address 0x%lx
\n
"
,
addr
);
return
-
1
;
}
#ifdef CONFIG_PERFMON
/*
* Check if debug registers are used by perfmon. This
* test must be done once we know that we can do the
* operation, i.e. the arguments are all valid, but
* before we start modifying the state.
*
* Perfmon needs to keep a count of how many processes
* are trying to modify the debug registers for system
* wide monitoring sessions.
*
* We also include read access here, because they may
* cause the PMU-installed debug register state
* (dbr[], ibr[]) to be reset. The two arrays are also
* used by perfmon, but we do not use
* IA64_THREAD_DBG_VALID. The registers are restored
* by the PMU context switch code.
*/
if
(
pfm_use_debug_registers
(
child
))
return
-
1
;
#endif
if
(
!
(
child
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
))
{
child
->
thread
.
flags
|=
IA64_THREAD_DBG_VALID
;
memset
(
child
->
thread
.
dbr
,
0
,
sizeof
(
child
->
thread
.
dbr
));
memset
(
child
->
thread
.
ibr
,
0
,
sizeof
(
child
->
thread
.
ibr
));
}
ptr
+=
regnum
;
if
((
regnum
&
1
)
&&
write_access
)
{
/* don't let the user set kernel-level breakpoints: */
*
ptr
=
*
data
&
~
(
7UL
<<
56
);
return
0
;
}
}
if
(
write_access
)
*
ptr
=
*
data
;
else
*
data
=
*
ptr
;
return
0
;
}
static
long
static
long
ptrace_getregs
(
struct
task_struct
*
child
,
struct
pt_all_user_regs
__user
*
ppr
)
ptrace_getregs
(
struct
task_struct
*
child
,
struct
pt_all_user_regs
__user
*
ppr
)
...
@@ -1626,3 +1310,892 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
...
@@ -1626,3 +1310,892 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
if
(
test_thread_flag
(
TIF_RESTORE_RSE
))
if
(
test_thread_flag
(
TIF_RESTORE_RSE
))
ia64_sync_krbs
();
ia64_sync_krbs
();
}
}
/* Utrace implementation starts here */
struct
regset_get
{
void
*
kbuf
;
void
__user
*
ubuf
;
};
struct
regset_set
{
const
void
*
kbuf
;
const
void
__user
*
ubuf
;
};
struct
regset_getset
{
struct
task_struct
*
target
;
const
struct
user_regset
*
regset
;
union
{
struct
regset_get
get
;
struct
regset_set
set
;
}
u
;
unsigned
int
pos
;
unsigned
int
count
;
int
ret
;
};
static
int
access_elf_gpreg
(
struct
task_struct
*
target
,
struct
unw_frame_info
*
info
,
unsigned
long
addr
,
unsigned
long
*
data
,
int
write_access
)
{
struct
pt_regs
*
pt
;
unsigned
long
*
ptr
=
NULL
;
int
ret
;
char
nat
=
0
;
pt
=
task_pt_regs
(
target
);
switch
(
addr
)
{
case
ELF_GR_OFFSET
(
1
):
ptr
=
&
pt
->
r1
;
break
;
case
ELF_GR_OFFSET
(
2
):
case
ELF_GR_OFFSET
(
3
):
ptr
=
(
void
*
)
&
pt
->
r2
+
(
addr
-
ELF_GR_OFFSET
(
2
));
break
;
case
ELF_GR_OFFSET
(
4
)
...
ELF_GR_OFFSET
(
7
):
if
(
write_access
)
{
/* read NaT bit first: */
unsigned
long
dummy
;
ret
=
unw_get_gr
(
info
,
addr
/
8
,
&
dummy
,
&
nat
);
if
(
ret
<
0
)
return
ret
;
}
return
unw_access_gr
(
info
,
addr
/
8
,
data
,
&
nat
,
write_access
);
case
ELF_GR_OFFSET
(
8
)
...
ELF_GR_OFFSET
(
11
):
ptr
=
(
void
*
)
&
pt
->
r8
+
addr
-
ELF_GR_OFFSET
(
8
);
break
;
case
ELF_GR_OFFSET
(
12
):
case
ELF_GR_OFFSET
(
13
):
ptr
=
(
void
*
)
&
pt
->
r12
+
addr
-
ELF_GR_OFFSET
(
12
);
break
;
case
ELF_GR_OFFSET
(
14
):
ptr
=
&
pt
->
r14
;
break
;
case
ELF_GR_OFFSET
(
15
):
ptr
=
&
pt
->
r15
;
}
if
(
write_access
)
*
ptr
=
*
data
;
else
*
data
=
*
ptr
;
return
0
;
}
static
int
access_elf_breg
(
struct
task_struct
*
target
,
struct
unw_frame_info
*
info
,
unsigned
long
addr
,
unsigned
long
*
data
,
int
write_access
)
{
struct
pt_regs
*
pt
;
unsigned
long
*
ptr
=
NULL
;
pt
=
task_pt_regs
(
target
);
switch
(
addr
)
{
case
ELF_BR_OFFSET
(
0
):
ptr
=
&
pt
->
b0
;
break
;
case
ELF_BR_OFFSET
(
1
)
...
ELF_BR_OFFSET
(
5
):
return
unw_access_br
(
info
,
(
addr
-
ELF_BR_OFFSET
(
0
))
/
8
,
data
,
write_access
);
case
ELF_BR_OFFSET
(
6
):
ptr
=
&
pt
->
b6
;
break
;
case
ELF_BR_OFFSET
(
7
):
ptr
=
&
pt
->
b7
;
}
if
(
write_access
)
*
ptr
=
*
data
;
else
*
data
=
*
ptr
;
return
0
;
}
static
int
access_elf_areg
(
struct
task_struct
*
target
,
struct
unw_frame_info
*
info
,
unsigned
long
addr
,
unsigned
long
*
data
,
int
write_access
)
{
struct
pt_regs
*
pt
;
unsigned
long
cfm
,
urbs_end
;
unsigned
long
*
ptr
=
NULL
;
pt
=
task_pt_regs
(
target
);
if
(
addr
>=
ELF_AR_RSC_OFFSET
&&
addr
<=
ELF_AR_SSD_OFFSET
)
{
switch
(
addr
)
{
case
ELF_AR_RSC_OFFSET
:
/* force PL3 */
if
(
write_access
)
pt
->
ar_rsc
=
*
data
|
(
3
<<
2
);
else
*
data
=
pt
->
ar_rsc
;
return
0
;
case
ELF_AR_BSP_OFFSET
:
/*
* By convention, we use PT_AR_BSP to refer to
* the end of the user-level backing store.
* Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
* to get the real value of ar.bsp at the time
* the kernel was entered.
*
* Furthermore, when changing the contents of
* PT_AR_BSP (or PT_CFM) while the task is
* blocked in a system call, convert the state
* so that the non-system-call exit
* path is used. This ensures that the proper
* state will be picked up when resuming
* execution. However, it *also* means that
* once we write PT_AR_BSP/PT_CFM, it won't be
* possible to modify the syscall arguments of
* the pending system call any longer. This
* shouldn't be an issue because modifying
* PT_AR_BSP/PT_CFM generally implies that
* we're either abandoning the pending system
* call or that we defer it's re-execution
* (e.g., due to GDB doing an inferior
* function call).
*/
urbs_end
=
ia64_get_user_rbs_end
(
target
,
pt
,
&
cfm
);
if
(
write_access
)
{
if
(
*
data
!=
urbs_end
)
{
if
(
in_syscall
(
pt
))
convert_to_non_syscall
(
target
,
pt
,
cfm
);
/*
* Simulate user-level write
* of ar.bsp:
*/
pt
->
loadrs
=
0
;
pt
->
ar_bspstore
=
*
data
;
}
}
else
*
data
=
urbs_end
;
return
0
;
case
ELF_AR_BSPSTORE_OFFSET
:
ptr
=
&
pt
->
ar_bspstore
;
break
;
case
ELF_AR_RNAT_OFFSET
:
ptr
=
&
pt
->
ar_rnat
;
break
;
case
ELF_AR_CCV_OFFSET
:
ptr
=
&
pt
->
ar_ccv
;
break
;
case
ELF_AR_UNAT_OFFSET
:
ptr
=
&
pt
->
ar_unat
;
break
;
case
ELF_AR_FPSR_OFFSET
:
ptr
=
&
pt
->
ar_fpsr
;
break
;
case
ELF_AR_PFS_OFFSET
:
ptr
=
&
pt
->
ar_pfs
;
break
;
case
ELF_AR_LC_OFFSET
:
return
unw_access_ar
(
info
,
UNW_AR_LC
,
data
,
write_access
);
case
ELF_AR_EC_OFFSET
:
return
unw_access_ar
(
info
,
UNW_AR_EC
,
data
,
write_access
);
case
ELF_AR_CSD_OFFSET
:
ptr
=
&
pt
->
ar_csd
;
break
;
case
ELF_AR_SSD_OFFSET
:
ptr
=
&
pt
->
ar_ssd
;
}
}
else
if
(
addr
>=
ELF_CR_IIP_OFFSET
&&
addr
<=
ELF_CR_IPSR_OFFSET
)
{
switch
(
addr
)
{
case
ELF_CR_IIP_OFFSET
:
ptr
=
&
pt
->
cr_iip
;
break
;
case
ELF_CFM_OFFSET
:
urbs_end
=
ia64_get_user_rbs_end
(
target
,
pt
,
&
cfm
);
if
(
write_access
)
{
if
(((
cfm
^
*
data
)
&
PFM_MASK
)
!=
0
)
{
if
(
in_syscall
(
pt
))
convert_to_non_syscall
(
target
,
pt
,
cfm
);
pt
->
cr_ifs
=
((
pt
->
cr_ifs
&
~
PFM_MASK
)
|
(
*
data
&
PFM_MASK
));
}
}
else
*
data
=
cfm
;
return
0
;
case
ELF_CR_IPSR_OFFSET
:
if
(
write_access
)
{
unsigned
long
tmp
=
*
data
;
/* psr.ri==3 is a reserved value: SDM 2:25 */
if
((
tmp
&
IA64_PSR_RI
)
==
IA64_PSR_RI
)
tmp
&=
~
IA64_PSR_RI
;
pt
->
cr_ipsr
=
((
tmp
&
IPSR_MASK
)
|
(
pt
->
cr_ipsr
&
~
IPSR_MASK
));
}
else
*
data
=
(
pt
->
cr_ipsr
&
IPSR_MASK
);
return
0
;
}
}
else
if
(
addr
==
ELF_NAT_OFFSET
)
return
access_nat_bits
(
target
,
pt
,
info
,
data
,
write_access
);
else
if
(
addr
==
ELF_PR_OFFSET
)
ptr
=
&
pt
->
pr
;
else
return
-
1
;
if
(
write_access
)
*
ptr
=
*
data
;
else
*
data
=
*
ptr
;
return
0
;
}
static
int
access_elf_reg
(
struct
task_struct
*
target
,
struct
unw_frame_info
*
info
,
unsigned
long
addr
,
unsigned
long
*
data
,
int
write_access
)
{
if
(
addr
>=
ELF_GR_OFFSET
(
1
)
&&
addr
<=
ELF_GR_OFFSET
(
15
))
return
access_elf_gpreg
(
target
,
info
,
addr
,
data
,
write_access
);
else
if
(
addr
>=
ELF_BR_OFFSET
(
0
)
&&
addr
<=
ELF_BR_OFFSET
(
7
))
return
access_elf_breg
(
target
,
info
,
addr
,
data
,
write_access
);
else
return
access_elf_areg
(
target
,
info
,
addr
,
data
,
write_access
);
}
void
do_gpregs_get
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
struct
pt_regs
*
pt
;
struct
regset_getset
*
dst
=
arg
;
elf_greg_t
tmp
[
16
];
unsigned
int
i
,
index
,
min_copy
;
if
(
unw_unwind_to_user
(
info
)
<
0
)
return
;
/*
* coredump format:
* r0-r31
* NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
* predicate registers (p0-p63)
* b0-b7
* ip cfm user-mask
* ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
*/
/* Skip r0 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_GR_OFFSET
(
1
))
{
dst
->
ret
=
user_regset_copyout_zero
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
0
,
ELF_GR_OFFSET
(
1
));
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
/* gr1 - gr15 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_GR_OFFSET
(
16
))
{
index
=
(
dst
->
pos
-
ELF_GR_OFFSET
(
1
))
/
sizeof
(
elf_greg_t
);
min_copy
=
ELF_GR_OFFSET
(
16
)
>
(
dst
->
pos
+
dst
->
count
)
?
(
dst
->
pos
+
dst
->
count
)
:
ELF_GR_OFFSET
(
16
);
for
(
i
=
dst
->
pos
;
i
<
min_copy
;
i
+=
sizeof
(
elf_greg_t
),
index
++
)
if
(
access_elf_reg
(
dst
->
target
,
info
,
i
,
&
tmp
[
index
],
0
)
<
0
)
{
dst
->
ret
=
-
EIO
;
return
;
}
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
tmp
,
ELF_GR_OFFSET
(
1
),
ELF_GR_OFFSET
(
16
));
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
/* r16-r31 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_NAT_OFFSET
)
{
pt
=
task_pt_regs
(
dst
->
target
);
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
&
pt
->
r16
,
ELF_GR_OFFSET
(
16
),
ELF_NAT_OFFSET
);
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
/* nat, pr, b0 - b7 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_CR_IIP_OFFSET
)
{
index
=
(
dst
->
pos
-
ELF_NAT_OFFSET
)
/
sizeof
(
elf_greg_t
);
min_copy
=
ELF_CR_IIP_OFFSET
>
(
dst
->
pos
+
dst
->
count
)
?
(
dst
->
pos
+
dst
->
count
)
:
ELF_CR_IIP_OFFSET
;
for
(
i
=
dst
->
pos
;
i
<
min_copy
;
i
+=
sizeof
(
elf_greg_t
),
index
++
)
if
(
access_elf_reg
(
dst
->
target
,
info
,
i
,
&
tmp
[
index
],
0
)
<
0
)
{
dst
->
ret
=
-
EIO
;
return
;
}
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
tmp
,
ELF_NAT_OFFSET
,
ELF_CR_IIP_OFFSET
);
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
if
(
dst
->
count
>
0
&&
dst
->
pos
<
(
ELF_AR_END_OFFSET
))
{
index
=
(
dst
->
pos
-
ELF_CR_IIP_OFFSET
)
/
sizeof
(
elf_greg_t
);
min_copy
=
ELF_AR_END_OFFSET
>
(
dst
->
pos
+
dst
->
count
)
?
(
dst
->
pos
+
dst
->
count
)
:
ELF_AR_END_OFFSET
;
for
(
i
=
dst
->
pos
;
i
<
min_copy
;
i
+=
sizeof
(
elf_greg_t
),
index
++
)
if
(
access_elf_reg
(
dst
->
target
,
info
,
i
,
&
tmp
[
index
],
0
)
<
0
)
{
dst
->
ret
=
-
EIO
;
return
;
}
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
tmp
,
ELF_CR_IIP_OFFSET
,
ELF_AR_END_OFFSET
);
}
}
void
do_gpregs_set
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
struct
pt_regs
*
pt
;
struct
regset_getset
*
dst
=
arg
;
elf_greg_t
tmp
[
16
];
unsigned
int
i
,
index
;
if
(
unw_unwind_to_user
(
info
)
<
0
)
return
;
/* Skip r0 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_GR_OFFSET
(
1
))
{
dst
->
ret
=
user_regset_copyin_ignore
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
0
,
ELF_GR_OFFSET
(
1
));
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
/* gr1-gr15 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_GR_OFFSET
(
16
))
{
i
=
dst
->
pos
;
index
=
(
dst
->
pos
-
ELF_GR_OFFSET
(
1
))
/
sizeof
(
elf_greg_t
);
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
tmp
,
ELF_GR_OFFSET
(
1
),
ELF_GR_OFFSET
(
16
));
if
(
dst
->
ret
)
return
;
for
(
;
i
<
dst
->
pos
;
i
+=
sizeof
(
elf_greg_t
),
index
++
)
if
(
access_elf_reg
(
dst
->
target
,
info
,
i
,
&
tmp
[
index
],
1
)
<
0
)
{
dst
->
ret
=
-
EIO
;
return
;
}
if
(
dst
->
count
==
0
)
return
;
}
/* gr16-gr31 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_NAT_OFFSET
)
{
pt
=
task_pt_regs
(
dst
->
target
);
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
&
pt
->
r16
,
ELF_GR_OFFSET
(
16
),
ELF_NAT_OFFSET
);
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
/* nat, pr, b0 - b7 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_CR_IIP_OFFSET
)
{
i
=
dst
->
pos
;
index
=
(
dst
->
pos
-
ELF_NAT_OFFSET
)
/
sizeof
(
elf_greg_t
);
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
tmp
,
ELF_NAT_OFFSET
,
ELF_CR_IIP_OFFSET
);
if
(
dst
->
ret
)
return
;
for
(;
i
<
dst
->
pos
;
i
+=
sizeof
(
elf_greg_t
),
index
++
)
if
(
access_elf_reg
(
dst
->
target
,
info
,
i
,
&
tmp
[
index
],
1
)
<
0
)
{
dst
->
ret
=
-
EIO
;
return
;
}
if
(
dst
->
count
==
0
)
return
;
}
/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
if
(
dst
->
count
>
0
&&
dst
->
pos
<
(
ELF_AR_END_OFFSET
))
{
i
=
dst
->
pos
;
index
=
(
dst
->
pos
-
ELF_CR_IIP_OFFSET
)
/
sizeof
(
elf_greg_t
);
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
tmp
,
ELF_CR_IIP_OFFSET
,
ELF_AR_END_OFFSET
);
if
(
dst
->
ret
)
return
;
for
(
;
i
<
dst
->
pos
;
i
+=
sizeof
(
elf_greg_t
),
index
++
)
if
(
access_elf_reg
(
dst
->
target
,
info
,
i
,
&
tmp
[
index
],
1
)
<
0
)
{
dst
->
ret
=
-
EIO
;
return
;
}
}
}
#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
void
do_fpregs_get
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
struct
regset_getset
*
dst
=
arg
;
struct
task_struct
*
task
=
dst
->
target
;
elf_fpreg_t
tmp
[
30
];
int
index
,
min_copy
,
i
;
if
(
unw_unwind_to_user
(
info
)
<
0
)
return
;
/* Skip pos 0 and 1 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_FP_OFFSET
(
2
))
{
dst
->
ret
=
user_regset_copyout_zero
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
0
,
ELF_FP_OFFSET
(
2
));
if
(
dst
->
count
==
0
||
dst
->
ret
)
return
;
}
/* fr2-fr31 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_FP_OFFSET
(
32
))
{
index
=
(
dst
->
pos
-
ELF_FP_OFFSET
(
2
))
/
sizeof
(
elf_fpreg_t
);
min_copy
=
min
(((
unsigned
int
)
ELF_FP_OFFSET
(
32
)),
dst
->
pos
+
dst
->
count
);
for
(
i
=
dst
->
pos
;
i
<
min_copy
;
i
+=
sizeof
(
elf_fpreg_t
),
index
++
)
if
(
unw_get_fr
(
info
,
i
/
sizeof
(
elf_fpreg_t
),
&
tmp
[
index
]))
{
dst
->
ret
=
-
EIO
;
return
;
}
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
tmp
,
ELF_FP_OFFSET
(
2
),
ELF_FP_OFFSET
(
32
));
if
(
dst
->
count
==
0
||
dst
->
ret
)
return
;
}
/* fph */
if
(
dst
->
count
>
0
)
{
ia64_flush_fph
(
dst
->
target
);
if
(
task
->
thread
.
flags
&
IA64_THREAD_FPH_VALID
)
dst
->
ret
=
user_regset_copyout
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
&
dst
->
target
->
thread
.
fph
,
ELF_FP_OFFSET
(
32
),
-
1
);
else
/* Zero fill instead. */
dst
->
ret
=
user_regset_copyout_zero
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
get
.
kbuf
,
&
dst
->
u
.
get
.
ubuf
,
ELF_FP_OFFSET
(
32
),
-
1
);
}
}
void
do_fpregs_set
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
struct
regset_getset
*
dst
=
arg
;
elf_fpreg_t
fpreg
,
tmp
[
30
];
int
index
,
start
,
end
;
if
(
unw_unwind_to_user
(
info
)
<
0
)
return
;
/* Skip pos 0 and 1 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_FP_OFFSET
(
2
))
{
dst
->
ret
=
user_regset_copyin_ignore
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
0
,
ELF_FP_OFFSET
(
2
));
if
(
dst
->
count
==
0
||
dst
->
ret
)
return
;
}
/* fr2-fr31 */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_FP_OFFSET
(
32
))
{
start
=
dst
->
pos
;
end
=
min
(((
unsigned
int
)
ELF_FP_OFFSET
(
32
)),
dst
->
pos
+
dst
->
count
);
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
tmp
,
ELF_FP_OFFSET
(
2
),
ELF_FP_OFFSET
(
32
));
if
(
dst
->
ret
)
return
;
if
(
start
&
0xF
)
{
/* only write high part */
if
(
unw_get_fr
(
info
,
start
/
sizeof
(
elf_fpreg_t
),
&
fpreg
))
{
dst
->
ret
=
-
EIO
;
return
;
}
tmp
[
start
/
sizeof
(
elf_fpreg_t
)
-
2
].
u
.
bits
[
0
]
=
fpreg
.
u
.
bits
[
0
];
start
&=
~
0xFUL
;
}
if
(
end
&
0xF
)
{
/* only write low part */
if
(
unw_get_fr
(
info
,
end
/
sizeof
(
elf_fpreg_t
),
&
fpreg
))
{
dst
->
ret
=
-
EIO
;
return
;
}
tmp
[
end
/
sizeof
(
elf_fpreg_t
)
-
2
].
u
.
bits
[
1
]
=
fpreg
.
u
.
bits
[
1
];
end
=
(
end
+
0xF
)
&
~
0xFUL
;
}
for
(
;
start
<
end
;
start
+=
sizeof
(
elf_fpreg_t
))
{
index
=
start
/
sizeof
(
elf_fpreg_t
);
if
(
unw_set_fr
(
info
,
index
,
tmp
[
index
-
2
]))
{
dst
->
ret
=
-
EIO
;
return
;
}
}
if
(
dst
->
ret
||
dst
->
count
==
0
)
return
;
}
/* fph */
if
(
dst
->
count
>
0
&&
dst
->
pos
<
ELF_FP_OFFSET
(
128
))
{
ia64_sync_fph
(
dst
->
target
);
dst
->
ret
=
user_regset_copyin
(
&
dst
->
pos
,
&
dst
->
count
,
&
dst
->
u
.
set
.
kbuf
,
&
dst
->
u
.
set
.
ubuf
,
&
dst
->
target
->
thread
.
fph
,
ELF_FP_OFFSET
(
32
),
-
1
);
}
}
static
int
do_regset_call
(
void
(
*
call
)(
struct
unw_frame_info
*
,
void
*
),
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
const
void
*
kbuf
,
const
void
__user
*
ubuf
)
{
struct
regset_getset
info
=
{
.
target
=
target
,
.
regset
=
regset
,
.
pos
=
pos
,
.
count
=
count
,
.
u
.
set
=
{
.
kbuf
=
kbuf
,
.
ubuf
=
ubuf
},
.
ret
=
0
};
if
(
target
==
current
)
unw_init_running
(
call
,
&
info
);
else
{
struct
unw_frame_info
ufi
;
memset
(
&
ufi
,
0
,
sizeof
(
ufi
));
unw_init_from_blocked_task
(
&
ufi
,
target
);
(
*
call
)(
&
ufi
,
&
info
);
}
return
info
.
ret
;
}
static
int
gpregs_get
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
void
*
kbuf
,
void
__user
*
ubuf
)
{
return
do_regset_call
(
do_gpregs_get
,
target
,
regset
,
pos
,
count
,
kbuf
,
ubuf
);
}
static
int
gpregs_set
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
const
void
*
kbuf
,
const
void
__user
*
ubuf
)
{
return
do_regset_call
(
do_gpregs_set
,
target
,
regset
,
pos
,
count
,
kbuf
,
ubuf
);
}
static
void
do_gpregs_writeback
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
do_sync_rbs
(
info
,
ia64_sync_user_rbs
);
}
/*
* This is called to write back the register backing store.
* ptrace does this before it stops, so that a tracer reading the user
* memory after the thread stops will get the current register data.
*/
static
int
gpregs_writeback
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
int
now
)
{
if
(
test_and_set_tsk_thread_flag
(
target
,
TIF_RESTORE_RSE
))
return
0
;
tsk_set_notify_resume
(
target
);
return
do_regset_call
(
do_gpregs_writeback
,
target
,
regset
,
0
,
0
,
NULL
,
NULL
);
}
static
int
fpregs_active
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
)
{
return
(
target
->
thread
.
flags
&
IA64_THREAD_FPH_VALID
)
?
128
:
32
;
}
static
int
fpregs_get
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
void
*
kbuf
,
void
__user
*
ubuf
)
{
return
do_regset_call
(
do_fpregs_get
,
target
,
regset
,
pos
,
count
,
kbuf
,
ubuf
);
}
static
int
fpregs_set
(
struct
task_struct
*
target
,
const
struct
user_regset
*
regset
,
unsigned
int
pos
,
unsigned
int
count
,
const
void
*
kbuf
,
const
void
__user
*
ubuf
)
{
return
do_regset_call
(
do_fpregs_set
,
target
,
regset
,
pos
,
count
,
kbuf
,
ubuf
);
}
static
int
access_uarea
(
struct
task_struct
*
child
,
unsigned
long
addr
,
unsigned
long
*
data
,
int
write_access
)
{
unsigned
int
pos
=
-
1
;
/* an invalid value */
int
ret
;
unsigned
long
*
ptr
,
regnum
;
if
((
addr
&
0x7
)
!=
0
)
{
dprintk
(
"ptrace: unaligned register address 0x%lx
\n
"
,
addr
);
return
-
1
;
}
if
((
addr
>=
PT_NAT_BITS
+
8
&&
addr
<
PT_F2
)
||
(
addr
>=
PT_R7
+
8
&&
addr
<
PT_B1
)
||
(
addr
>=
PT_AR_LC
+
8
&&
addr
<
PT_CR_IPSR
)
||
(
addr
>=
PT_AR_SSD
+
8
&&
addr
<
PT_DBR
))
{
dprintk
(
"ptrace: rejecting access to register "
"address 0x%lx
\n
"
,
addr
);
return
-
1
;
}
switch
(
addr
)
{
case
PT_F32
...
(
PT_F127
+
15
):
pos
=
addr
-
PT_F32
+
ELF_FP_OFFSET
(
32
);
break
;
case
PT_F2
...
(
PT_F5
+
15
):
pos
=
addr
-
PT_F2
+
ELF_FP_OFFSET
(
2
);
break
;
case
PT_F10
...
(
PT_F31
+
15
):
pos
=
addr
-
PT_F10
+
ELF_FP_OFFSET
(
10
);
break
;
case
PT_F6
...
(
PT_F9
+
15
):
pos
=
addr
-
PT_F6
+
ELF_FP_OFFSET
(
6
);
break
;
}
if
(
pos
!=
-
1
)
{
if
(
write_access
)
ret
=
fpregs_set
(
child
,
NULL
,
pos
,
sizeof
(
unsigned
long
),
data
,
NULL
);
else
ret
=
fpregs_get
(
child
,
NULL
,
pos
,
sizeof
(
unsigned
long
),
data
,
NULL
);
if
(
ret
!=
0
)
return
-
1
;
return
0
;
}
switch
(
addr
)
{
case
PT_NAT_BITS
:
pos
=
ELF_NAT_OFFSET
;
break
;
case
PT_R4
...
PT_R7
:
pos
=
addr
-
PT_R4
+
ELF_GR_OFFSET
(
4
);
break
;
case
PT_B1
...
PT_B5
:
pos
=
addr
-
PT_B1
+
ELF_BR_OFFSET
(
1
);
break
;
case
PT_AR_EC
:
pos
=
ELF_AR_EC_OFFSET
;
break
;
case
PT_AR_LC
:
pos
=
ELF_AR_LC_OFFSET
;
break
;
case
PT_CR_IPSR
:
pos
=
ELF_CR_IPSR_OFFSET
;
break
;
case
PT_CR_IIP
:
pos
=
ELF_CR_IIP_OFFSET
;
break
;
case
PT_CFM
:
pos
=
ELF_CFM_OFFSET
;
break
;
case
PT_AR_UNAT
:
pos
=
ELF_AR_UNAT_OFFSET
;
break
;
case
PT_AR_PFS
:
pos
=
ELF_AR_PFS_OFFSET
;
break
;
case
PT_AR_RSC
:
pos
=
ELF_AR_RSC_OFFSET
;
break
;
case
PT_AR_RNAT
:
pos
=
ELF_AR_RNAT_OFFSET
;
break
;
case
PT_AR_BSPSTORE
:
pos
=
ELF_AR_BSPSTORE_OFFSET
;
break
;
case
PT_PR
:
pos
=
ELF_PR_OFFSET
;
break
;
case
PT_B6
:
pos
=
ELF_BR_OFFSET
(
6
);
break
;
case
PT_AR_BSP
:
pos
=
ELF_AR_BSP_OFFSET
;
break
;
case
PT_R1
...
PT_R3
:
pos
=
addr
-
PT_R1
+
ELF_GR_OFFSET
(
1
);
break
;
case
PT_R12
...
PT_R15
:
pos
=
addr
-
PT_R12
+
ELF_GR_OFFSET
(
12
);
break
;
case
PT_R8
...
PT_R11
:
pos
=
addr
-
PT_R8
+
ELF_GR_OFFSET
(
8
);
break
;
case
PT_R16
...
PT_R31
:
pos
=
addr
-
PT_R16
+
ELF_GR_OFFSET
(
16
);
break
;
case
PT_AR_CCV
:
pos
=
ELF_AR_CCV_OFFSET
;
break
;
case
PT_AR_FPSR
:
pos
=
ELF_AR_FPSR_OFFSET
;
break
;
case
PT_B0
:
pos
=
ELF_BR_OFFSET
(
0
);
break
;
case
PT_B7
:
pos
=
ELF_BR_OFFSET
(
7
);
break
;
case
PT_AR_CSD
:
pos
=
ELF_AR_CSD_OFFSET
;
break
;
case
PT_AR_SSD
:
pos
=
ELF_AR_SSD_OFFSET
;
break
;
}
if
(
pos
!=
-
1
)
{
if
(
write_access
)
ret
=
gpregs_set
(
child
,
NULL
,
pos
,
sizeof
(
unsigned
long
),
data
,
NULL
);
else
ret
=
gpregs_get
(
child
,
NULL
,
pos
,
sizeof
(
unsigned
long
),
data
,
NULL
);
if
(
ret
!=
0
)
return
-
1
;
return
0
;
}
/* access debug registers */
if
(
addr
>=
PT_IBR
)
{
regnum
=
(
addr
-
PT_IBR
)
>>
3
;
ptr
=
&
child
->
thread
.
ibr
[
0
];
}
else
{
regnum
=
(
addr
-
PT_DBR
)
>>
3
;
ptr
=
&
child
->
thread
.
dbr
[
0
];
}
if
(
regnum
>=
8
)
{
dprintk
(
"ptrace: rejecting access to register "
"address 0x%lx
\n
"
,
addr
);
return
-
1
;
}
#ifdef CONFIG_PERFMON
/*
* Check if debug registers are used by perfmon. This
* test must be done once we know that we can do the
* operation, i.e. the arguments are all valid, but
* before we start modifying the state.
*
* Perfmon needs to keep a count of how many processes
* are trying to modify the debug registers for system
* wide monitoring sessions.
*
* We also include read access here, because they may
* cause the PMU-installed debug register state
* (dbr[], ibr[]) to be reset. The two arrays are also
* used by perfmon, but we do not use
* IA64_THREAD_DBG_VALID. The registers are restored
* by the PMU context switch code.
*/
if
(
pfm_use_debug_registers
(
child
))
return
-
1
;
#endif
if
(
!
(
child
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
))
{
child
->
thread
.
flags
|=
IA64_THREAD_DBG_VALID
;
memset
(
child
->
thread
.
dbr
,
0
,
sizeof
(
child
->
thread
.
dbr
));
memset
(
child
->
thread
.
ibr
,
0
,
sizeof
(
child
->
thread
.
ibr
));
}
ptr
+=
regnum
;
if
((
regnum
&
1
)
&&
write_access
)
{
/* don't let the user set kernel-level breakpoints: */
*
ptr
=
*
data
&
~
(
7UL
<<
56
);
return
0
;
}
if
(
write_access
)
*
ptr
=
*
data
;
else
*
data
=
*
ptr
;
return
0
;
}
static
const
struct
user_regset
native_regsets
[]
=
{
{
.
core_note_type
=
NT_PRSTATUS
,
.
n
=
ELF_NGREG
,
.
size
=
sizeof
(
elf_greg_t
),
.
align
=
sizeof
(
elf_greg_t
),
.
get
=
gpregs_get
,
.
set
=
gpregs_set
,
.
writeback
=
gpregs_writeback
},
{
.
core_note_type
=
NT_PRFPREG
,
.
n
=
ELF_NFPREG
,
.
size
=
sizeof
(
elf_fpreg_t
),
.
align
=
sizeof
(
elf_fpreg_t
),
.
get
=
fpregs_get
,
.
set
=
fpregs_set
,
.
active
=
fpregs_active
},
};
static
const
struct
user_regset_view
user_ia64_view
=
{
.
name
=
"ia64"
,
.
e_machine
=
EM_IA_64
,
.
regsets
=
native_regsets
,
.
n
=
ARRAY_SIZE
(
native_regsets
)
};
const
struct
user_regset_view
*
task_user_regset_view
(
struct
task_struct
*
tsk
)
{
#ifdef CONFIG_IA32_SUPPORT
extern
const
struct
user_regset_view
user_ia32_view
;
if
(
IS_IA32_PROCESS
(
task_pt_regs
(
tsk
)))
return
&
user_ia32_view
;
#endif
return
&
user_ia64_view
;
}
include/asm-ia64/elf.h
View file @
78514c10
...
@@ -26,6 +26,7 @@
...
@@ -26,6 +26,7 @@
#define ELF_ARCH EM_IA_64
#define ELF_ARCH EM_IA_64
#define USE_ELF_CORE_DUMP
#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
interpreted as follows by Linux: */
interpreted as follows by Linux: */
...
@@ -154,6 +155,30 @@ extern void ia64_init_addr_space (void);
...
@@ -154,6 +155,30 @@ extern void ia64_init_addr_space (void);
#define ELF_NGREG 128
/* we really need just 72 but let's leave some headroom... */
#define ELF_NGREG 128
/* we really need just 72 but let's leave some headroom... */
#define ELF_NFPREG 128
/* f0 and f1 could be omitted, but so what... */
#define ELF_NFPREG 128
/* f0 and f1 could be omitted, but so what... */
/* elf_gregset_t register offsets */
#define ELF_GR_0_OFFSET 0
#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t))
#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t))
#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t))
#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t))
#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t))
#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t))
#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t))
#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t))
#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t))
#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t))
#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t))
#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t))
#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t))
#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t))
typedef
unsigned
long
elf_fpxregset_t
;
typedef
unsigned
long
elf_fpxregset_t
;
typedef
unsigned
long
elf_greg_t
;
typedef
unsigned
long
elf_greg_t
;
...
@@ -183,12 +208,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
...
@@ -183,12 +208,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
struct
task_struct
;
struct
task_struct
;
extern
int
dump_task_regs
(
struct
task_struct
*
,
elf_gregset_t
*
);
extern
int
dump_task_fpu
(
struct
task_struct
*
,
elf_fpregset_t
*
);
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment