Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
4cc9bed0
Commit
4cc9bed0
authored
Jan 05, 2011
by
Martin Schwidefsky
Committed by
Martin Schwidefsky
Jan 05, 2011
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[S390] cleanup ftrace backend functions
Signed-off-by:
Martin Schwidefsky
<
schwidefsky@de.ibm.com
>
parent
5e9a2692
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
135 additions
and
171 deletions
+135
-171
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/ftrace.h
+4
-7
arch/s390/kernel/ftrace.c
arch/s390/kernel/ftrace.c
+117
-121
arch/s390/kernel/mcount.S
arch/s390/kernel/mcount.S
+7
-23
arch/s390/kernel/mcount64.S
arch/s390/kernel/mcount64.S
+7
-20
No files found.
arch/s390/include/asm/ftrace.h
View file @
4cc9bed0
...
@@ -4,20 +4,17 @@
...
@@ -4,20 +4,17 @@
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
extern
void
_mcount
(
void
);
extern
void
_mcount
(
void
);
extern
unsigned
long
ftrace_dyn_func
;
struct
dyn_arch_ftrace
{
};
struct
dyn_arch_ftrace
{
};
#define MCOUNT_ADDR ((long)_mcount)
#define MCOUNT_ADDR ((long)_mcount)
#ifdef CONFIG_64BIT
#ifdef CONFIG_64BIT
#define MCOUNT_OFFSET_RET 18
#define MCOUNT_INSN_SIZE 12
#define MCOUNT_INSN_SIZE 24
#define MCOUNT_OFFSET 14
#else
#define MCOUNT_OFFSET_RET 26
#define MCOUNT_INSN_SIZE 30
#define MCOUNT_OFFSET 8
#define MCOUNT_OFFSET 8
#else
#define MCOUNT_INSN_SIZE 20
#define MCOUNT_OFFSET 4
#endif
#endif
static
inline
unsigned
long
ftrace_call_adjust
(
unsigned
long
addr
)
static
inline
unsigned
long
ftrace_call_adjust
(
unsigned
long
addr
)
...
...
arch/s390/kernel/ftrace.c
View file @
4cc9bed0
...
@@ -4,7 +4,7 @@
...
@@ -4,7 +4,7 @@
* Copyright IBM Corp. 2009
* Copyright IBM Corp. 2009
*
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*
Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
*/
#include <linux/hardirq.h>
#include <linux/hardirq.h>
...
@@ -12,176 +12,144 @@
...
@@ -12,176 +12,144 @@
#include <linux/ftrace.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/types.h>
#include <linux/kprobes.h>
#include <trace/syscall.h>
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_64BIT
#define MCOUNT_OFFSET_RET 12
#else
#define MCOUNT_OFFSET_RET 22
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE
void
ftrace_disable_code
(
void
);
void
ftrace_disable_code
(
void
);
void
ftrace_disable_return
(
void
);
void
ftrace_enable_insn
(
void
);
void
ftrace_call_code
(
void
);
void
ftrace_nop_code
(
void
);
#define FTRACE_INSN_SIZE 4
#ifdef CONFIG_64BIT
#ifdef CONFIG_64BIT
/*
* The 64-bit mcount code looks like this:
* stg %r14,8(%r15) # offset 0
* > larl %r1,<&counter> # offset 6
* > brasl %r14,_mcount # offset 12
* lg %r14,8(%r15) # offset 18
* Total length is 24 bytes. The middle two instructions of the mcount
* block get overwritten by ftrace_make_nop / ftrace_make_call.
* The 64-bit enabled ftrace code block looks like this:
* stg %r14,8(%r15) # offset 0
* > lg %r1,__LC_FTRACE_FUNC # offset 6
* > lgr %r0,%r0 # offset 12
* > basr %r14,%r1 # offset 16
* lg %r14,8(%15) # offset 18
* The return points of the mcount/ftrace function have the same offset 18.
* The 64-bit disable ftrace code block looks like this:
* stg %r14,8(%r15) # offset 0
* > jg .+18 # offset 6
* > lgr %r0,%r0 # offset 12
* > basr %r14,%r1 # offset 16
* lg %r14,8(%15) # offset 18
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
*/
asm
(
asm
(
" .align 4
\n
"
" .align 4
\n
"
"ftrace_disable_code:
\n
"
"ftrace_disable_code:
\n
"
" j 0f
\n
"
" jg 0f
\n
"
" .word 0x0024
\n
"
" lg %r1,"
__stringify
(
__LC_FTRACE_FUNC
)
"
\n
"
" basr %r14,%r1
\n
"
"ftrace_disable_return:
\n
"
" lg %r14,8(15)
\n
"
" lgr %r0,%r0
\n
"
" lgr %r0,%r0
\n
"
"0:
\n
"
);
" basr %r14,%r1
\n
"
"0:
\n
"
asm
(
" .align 4
\n
"
" .align 4
\n
"
"ftrace_
nop_code
:
\n
"
"ftrace_
enable_insn
:
\n
"
"
j .+"
__stringify
(
MCOUNT_INSN_SIZE
)
"
\n
"
);
"
lg %r1,"
__stringify
(
__LC_FTRACE_FUNC
)
"
\n
"
);
asm
(
#define FTRACE_INSN_SIZE 6
" .align 4
\n
"
"ftrace_call_code:
\n
"
" stg %r14,8(%r15)
\n
"
);
#else
/* CONFIG_64BIT */
#else
/* CONFIG_64BIT */
/*
* The 31-bit mcount code looks like this:
* st %r14,4(%r15) # offset 0
* > bras %r1,0f # offset 4
* > .long _mcount # offset 8
* > .long <&counter> # offset 12
* > 0: l %r14,0(%r1) # offset 16
* > l %r1,4(%r1) # offset 20
* basr %r14,%r14 # offset 24
* l %r14,4(%r15) # offset 26
* Total length is 30 bytes. The twenty bytes starting from offset 4
* to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
* The 31-bit enabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > l %r14,__LC_FTRACE_FUNC # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The return points of the mcount/ftrace function have the same offset 26.
* The 31-bit disabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > j .+26 # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The j instruction branches to offset 30 to skip as many instructions
* as possible.
*/
asm
(
asm
(
" .align 4
\n
"
" .align 4
\n
"
"ftrace_disable_code:
\n
"
"ftrace_disable_code:
\n
"
" j 1f
\n
"
" j 0f
\n
"
" j 0f
\n
"
" l %r1,"
__stringify
(
__LC_FTRACE_FUNC
)
"
\n
"
" .fill 12,1,0x07
\n
"
" basr %r14,%r1
\n
"
"0: basr %r14,%r14
\n
"
"ftrace_disable_return:
\n
"
"1:
\n
"
" l %r14,4(%r15)
\n
"
" j 0f
\n
"
" bcr 0,%r7
\n
"
" bcr 0,%r7
\n
"
" bcr 0,%r7
\n
"
" bcr 0,%r7
\n
"
" bcr 0,%r7
\n
"
" bcr 0,%r7
\n
"
"0:
\n
"
);
asm
(
" .align 4
\n
"
" .align 4
\n
"
"ftrace_
nop_code
:
\n
"
"ftrace_
enable_insn
:
\n
"
"
j .+"
__stringify
(
MCOUNT_INSN_SIZE
)
"
\n
"
);
"
l %r14,"
__stringify
(
__LC_FTRACE_FUNC
)
"
\n
"
);
asm
(
#define FTRACE_INSN_SIZE 4
" .align 4
\n
"
"ftrace_call_code:
\n
"
" st %r14,4(%r15)
\n
"
);
#endif
/* CONFIG_64BIT */
#endif
/* CONFIG_64BIT */
static
int
ftrace_modify_code
(
unsigned
long
ip
,
void
*
old_code
,
int
old_size
,
void
*
new_code
,
int
new_size
)
{
unsigned
char
replaced
[
MCOUNT_INSN_SIZE
];
/*
* Note: Due to modules code can disappear and change.
* We need to protect against faulting as well as code
* changing. We do this by using the probe_kernel_*
* functions.
* This however is just a simple sanity check.
*/
if
(
probe_kernel_read
(
replaced
,
(
void
*
)
ip
,
old_size
))
return
-
EFAULT
;
if
(
memcmp
(
replaced
,
old_code
,
old_size
)
!=
0
)
return
-
EINVAL
;
if
(
probe_kernel_write
((
void
*
)
ip
,
new_code
,
new_size
))
return
-
EPERM
;
return
0
;
}
static
int
ftrace_make_initial_nop
(
struct
module
*
mod
,
struct
dyn_ftrace
*
rec
,
unsigned
long
addr
)
{
return
ftrace_modify_code
(
rec
->
ip
,
ftrace_call_code
,
FTRACE_INSN_SIZE
,
ftrace_disable_code
,
MCOUNT_INSN_SIZE
);
}
int
ftrace_make_nop
(
struct
module
*
mod
,
struct
dyn_ftrace
*
rec
,
int
ftrace_make_nop
(
struct
module
*
mod
,
struct
dyn_ftrace
*
rec
,
unsigned
long
addr
)
unsigned
long
addr
)
{
{
if
(
addr
==
MCOUNT_ADDR
)
if
(
probe_kernel_write
((
void
*
)
rec
->
ip
,
ftrace_disable_code
,
return
ftrace_make_initial_nop
(
mod
,
rec
,
addr
);
MCOUNT_INSN_SIZE
))
return
ftrace_modify_code
(
rec
->
ip
,
return
-
EPERM
;
ftrace_call_code
,
FTRACE_INSN_SIZE
,
return
0
;
ftrace_nop_code
,
FTRACE_INSN_SIZE
);
}
}
int
ftrace_make_call
(
struct
dyn_ftrace
*
rec
,
unsigned
long
addr
)
int
ftrace_make_call
(
struct
dyn_ftrace
*
rec
,
unsigned
long
addr
)
{
{
return
ftrace_modify_code
(
rec
->
ip
,
if
(
probe_kernel_write
((
void
*
)
rec
->
ip
,
ftrace_enable_insn
,
ftrace_nop_code
,
FTRACE_INSN_SIZE
,
FTRACE_INSN_SIZE
))
ftrace_call_code
,
FTRACE_INSN_SIZE
);
return
-
EPERM
;
return
0
;
}
}
int
ftrace_update_ftrace_func
(
ftrace_func_t
func
)
int
ftrace_update_ftrace_func
(
ftrace_func_t
func
)
{
{
ftrace_dyn_func
=
(
unsigned
long
)
func
;
return
0
;
return
0
;
}
}
int
__init
ftrace_dyn_arch_init
(
void
*
data
)
int
__init
ftrace_dyn_arch_init
(
void
*
data
)
{
{
*
(
unsigned
long
*
)
data
=
0
;
*
(
unsigned
long
*
)
data
=
0
;
return
0
;
return
0
;
}
}
#endif
/* CONFIG_DYNAMIC_FTRACE */
#endif
/* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* Patch the kernel code at ftrace_graph_caller location:
* The instruction there is branch relative on condition. The condition mask
* is either all ones (always branch aka disable ftrace_graph_caller) or all
* zeroes (nop aka enable ftrace_graph_caller).
* Instruction format for brc is a7m4xxxx where m is the condition mask.
*/
int
ftrace_enable_ftrace_graph_caller
(
void
)
{
unsigned
short
opcode
=
0xa704
;
return
probe_kernel_write
(
ftrace_graph_caller
,
&
opcode
,
sizeof
(
opcode
));
}
int
ftrace_disable_ftrace_graph_caller
(
void
)
{
unsigned
short
opcode
=
0xa7f4
;
return
probe_kernel_write
(
ftrace_graph_caller
,
&
opcode
,
sizeof
(
opcode
));
}
static
inline
unsigned
long
ftrace_mcount_call_adjust
(
unsigned
long
addr
)
{
return
addr
-
(
ftrace_disable_return
-
ftrace_disable_code
);
}
#else
/* CONFIG_DYNAMIC_FTRACE */
static
inline
unsigned
long
ftrace_mcount_call_adjust
(
unsigned
long
addr
)
{
return
addr
-
MCOUNT_OFFSET_RET
;
}
#endif
/* CONFIG_DYNAMIC_FTRACE */
/*
/*
* Hook the return address and push it in the stack of return addresses
* Hook the return address and push it in the stack of return addresses
* in current thread info.
* in current thread info.
*/
*/
unsigned
long
prepare_ftrace_return
(
unsigned
long
ip
,
unsigned
long
parent
)
unsigned
long
__kprobes
prepare_ftrace_return
(
unsigned
long
parent
,
unsigned
long
ip
)
{
{
struct
ftrace_graph_ent
trace
;
struct
ftrace_graph_ent
trace
;
...
@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
...
@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
goto
out
;
goto
out
;
if
(
ftrace_push_return_trace
(
parent
,
ip
,
&
trace
.
depth
,
0
)
==
-
EBUSY
)
if
(
ftrace_push_return_trace
(
parent
,
ip
,
&
trace
.
depth
,
0
)
==
-
EBUSY
)
goto
out
;
goto
out
;
trace
.
func
=
ftrace_mcount_call_adjust
(
ip
)
&
PSW_ADDR_INSN
;
trace
.
func
=
(
ip
&
PSW_ADDR_INSN
)
-
MCOUNT_OFFSET_RET
;
/* Only trace if the calling function expects to. */
/* Only trace if the calling function expects to. */
if
(
!
ftrace_graph_entry
(
&
trace
))
{
if
(
!
ftrace_graph_entry
(
&
trace
))
{
current
->
curr_ret_stack
--
;
current
->
curr_ret_stack
--
;
goto
out
;
goto
out
;
}
}
parent
=
(
unsigned
long
)
return_to_handler
;
parent
=
(
unsigned
long
)
return_to_handler
;
out:
out:
return
parent
;
return
parent
;
}
}
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* Patch the kernel code at ftrace_graph_caller location. The instruction
* there is branch relative and save to prepare_ftrace_return. To disable
* the call to prepare_ftrace_return we patch the bras offset to point
* directly after the instructions. To enable the call we calculate
* the original offset to prepare_ftrace_return and put it back.
*/
int
ftrace_enable_ftrace_graph_caller
(
void
)
{
unsigned
short
offset
;
offset
=
((
void
*
)
prepare_ftrace_return
-
(
void
*
)
ftrace_graph_caller
)
/
2
;
return
probe_kernel_write
(
ftrace_graph_caller
+
2
,
&
offset
,
sizeof
(
offset
));
}
int
ftrace_disable_ftrace_graph_caller
(
void
)
{
static
unsigned
short
offset
=
0x0002
;
return
probe_kernel_write
(
ftrace_graph_caller
+
2
,
&
offset
,
sizeof
(
offset
));
}
#endif
/* CONFIG_DYNAMIC_FTRACE */
#endif
/* CONFIG_FUNCTION_GRAPH_TRACER */
#endif
/* CONFIG_FUNCTION_GRAPH_TRACER */
arch/s390/kernel/mcount.S
View file @
4cc9bed0
...
@@ -18,22 +18,12 @@ _mcount:
...
@@ -18,22 +18,12 @@ _mcount:
#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE
br
%
r14
br
%
r14
.
data
.
globl
ftrace_dyn_func
ftrace_dyn_func
:
.
long
ftrace_stub
.
previous
.
globl
ftrace_caller
.
globl
ftrace_caller
ftrace_caller
:
ftrace_caller
:
#endif
#endif
stm
%
r2
,%
r5
,
16
(%
r15
)
stm
%
r2
,%
r5
,
16
(%
r15
)
bras
%
r1
,
2
f
bras
%
r1
,
2
f
#ifdef CONFIG_DYNAMIC_FTRACE
0
:
.
long
ftrace_dyn_func
#else
0
:
.
long
ftrace_trace_function
0
:
.
long
ftrace_trace_function
#endif
1
:
.
long
function_trace_stop
1
:
.
long
function_trace_stop
2
:
l
%
r2
,
1
b
-
0
b
(%
r1
)
2
:
l
%
r2
,
1
b
-
0
b
(%
r1
)
icm
%
r2
,
0xf
,
0
(%
r2
)
icm
%
r2
,
0xf
,
0
(%
r2
)
...
@@ -49,21 +39,15 @@ ftrace_caller:
...
@@ -49,21 +39,15 @@ ftrace_caller:
l
%
r14
,
0
(%
r14
)
l
%
r14
,
0
(%
r14
)
basr
%
r14
,%
r14
basr
%
r14
,%
r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
l
%
r2
,
100
(%
r15
)
l
%
r3
,
152
(%
r15
)
.
globl
ftrace_graph_caller
.
globl
ftrace_graph_caller
ftrace_graph_caller
:
ftrace_graph_caller
:
#
This
unconditional
branch
gets
runtime
patched
.
Change
only
if
#
The
bras
instruction
gets
runtime
patched
to
call
prepare_ftrace_return
.
#
you
know
what
you
are
doing
.
See
ftrace_enable_graph_caller
()
.
#
See
ftrace_enable_ftrace_graph_caller
.
The
patched
instruction
is
:
j
1
f
#
bras
%
r14
,
prepare_ftrace_return
#endif
bras
%
r14
,
0
f
bras
%
r1
,
0
f
0
:
st
%
r2
,
100
(%
r15
)
.
long
prepare_ftrace_return
0
:
l
%
r2
,
152
(%
r15
)
l
%
r4
,
0
(%
r1
)
l
%
r3
,
100
(%
r15
)
basr
%
r14
,%
r4
st
%
r2
,
100
(%
r15
)
1
:
#endif
#endif
ahi
%
r15
,
96
ahi
%
r15
,
96
l
%
r14
,
56
(%
r15
)
l
%
r14
,
56
(%
r15
)
...
...
arch/s390/kernel/mcount64.S
View file @
4cc9bed0
...
@@ -18,12 +18,6 @@ _mcount:
...
@@ -18,12 +18,6 @@ _mcount:
#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE
br
%
r14
br
%
r14
.
data
.
globl
ftrace_dyn_func
ftrace_dyn_func
:
.
quad
ftrace_stub
.
previous
.
globl
ftrace_caller
.
globl
ftrace_caller
ftrace_caller
:
ftrace_caller
:
#endif
#endif
...
@@ -37,26 +31,19 @@ ftrace_caller:
...
@@ -37,26 +31,19 @@ ftrace_caller:
stg
%
r1
,
__SF_BACKCHAIN
(%
r15
)
stg
%
r1
,
__SF_BACKCHAIN
(%
r15
)
lgr
%
r2
,%
r14
lgr
%
r2
,%
r14
lg
%
r3
,
168
(%
r15
)
lg
%
r3
,
168
(%
r15
)
#ifdef CONFIG_DYNAMIC_FTRACE
larl
%
r14
,
ftrace_dyn_func
#else
larl
%
r14
,
ftrace_trace_function
larl
%
r14
,
ftrace_trace_function
#endif
lg
%
r14
,
0
(%
r14
)
lg
%
r14
,
0
(%
r14
)
basr
%
r14
,%
r14
basr
%
r14
,%
r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
lg
%
r2
,
168
(%
r15
)
lg
%
r3
,
272
(%
r15
)
.
globl
ftrace_graph_caller
.
globl
ftrace_graph_caller
ftrace_graph_caller
:
ftrace_graph_caller
:
#
This
unconditional
branch
gets
runtime
patched
.
Change
only
if
#
The
bras
instruction
gets
runtime
patched
to
call
prepare_ftrace_return
.
#
you
know
what
you
are
doing
.
See
ftrace_enable_graph_caller
()
.
#
See
ftrace_enable_ftrace_graph_caller
.
The
patched
instruction
is
:
j
0
f
#
bras
%
r14
,
prepare_ftrace_return
#endif
bras
%
r14
,
0
f
lg
%
r2
,
272
(%
r15
)
0
:
stg
%
r2
,
168
(%
r15
)
lg
%
r3
,
168
(%
r15
)
brasl
%
r14
,
prepare_ftrace_return
stg
%
r2
,
168
(%
r15
)
0
:
#endif
#endif
aghi
%
r15
,
160
aghi
%
r15
,
160
lmg
%
r2
,%
r5
,
32
(%
r15
)
lmg
%
r2
,%
r5
,
32
(%
r15
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment