Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
03b30d15
Commit
03b30d15
authored
Jan 22, 2009
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'tracing/ftrace' into tracing/core
parents
b43f7093
3690b5e6
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
280 additions
and
82 deletions
+280
-82
Documentation/ftrace.txt
Documentation/ftrace.txt
+74
-0
arch/x86/kernel/ds.c
arch/x86/kernel/ds.c
+17
-14
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack.c
+6
-0
include/linux/ftrace.h
include/linux/ftrace.h
+13
-0
kernel/trace/trace.h
kernel/trace/trace.h
+0
-1
kernel/trace/trace_hw_branches.c
kernel/trace/trace_hw_branches.c
+141
-32
kernel/trace/trace_workqueue.c
kernel/trace/trace_workqueue.c
+29
-35
No files found.
Documentation/ftrace.txt
View file @
03b30d15
...
@@ -165,6 +165,8 @@ Here is the list of current tracers that may be configured.
...
@@ -165,6 +165,8 @@ Here is the list of current tracers that may be configured.
nop - This is not a tracer. To remove all tracers from tracing
nop - This is not a tracer. To remove all tracers from tracing
simply echo "nop" into current_tracer.
simply echo "nop" into current_tracer.
hw-branch-tracer - traces branches on all cpu's in a circular buffer.
Examples of using the tracer
Examples of using the tracer
----------------------------
----------------------------
...
@@ -1152,6 +1154,78 @@ int main (int argc, char **argv)
...
@@ -1152,6 +1154,78 @@ int main (int argc, char **argv)
return 0;
return 0;
}
}
hw-branch-tracer (x86 only)
---------------------------
This tracer uses the x86 last branch tracing hardware feature to
collect a branch trace on all cpus with relatively low overhead.
The tracer uses a fixed-size circular buffer per cpu and only
traces ring 0 branches. The trace file dumps that buffer in the
following format:
# tracer: hw-branch-tracer
#
# CPU# TO <- FROM
0 scheduler_tick+0xb5/0x1bf <- task_tick_idle+0x5/0x6
2 run_posix_cpu_timers+0x2b/0x72a <- run_posix_cpu_timers+0x25/0x72a
0 scheduler_tick+0x139/0x1bf <- scheduler_tick+0xed/0x1bf
0 scheduler_tick+0x17c/0x1bf <- scheduler_tick+0x148/0x1bf
2 run_posix_cpu_timers+0x9e/0x72a <- run_posix_cpu_timers+0x5e/0x72a
0 scheduler_tick+0x1b6/0x1bf <- scheduler_tick+0x1aa/0x1bf
The tracer may be used to dump the trace for the oops'ing cpu on a
kernel oops into the system log. To enable this, ftrace_dump_on_oops
must be set. To set ftrace_dump_on_oops, one can either use the sysctl
function or set it via the proc system interface.
sysctl kernel.ftrace_dump_on_oops=1
or
echo 1 > /proc/sys/kernel/ftrace_dump_on_oops
Here's an example of such a dump after a null pointer dereference in a
kernel module:
[57848.105921] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
[57848.106019] IP: [<ffffffffa0000006>] open+0x6/0x14 [oops]
[57848.106019] PGD 2354e9067 PUD 2375e7067 PMD 0
[57848.106019] Oops: 0002 [#1] SMP
[57848.106019] last sysfs file: /sys/devices/pci0000:00/0000:00:1e.0/0000:20:05.0/local_cpus
[57848.106019] Dumping ftrace buffer:
[57848.106019] ---------------------------------
[...]
[57848.106019] 0 chrdev_open+0xe6/0x165 <- cdev_put+0x23/0x24
[57848.106019] 0 chrdev_open+0x117/0x165 <- chrdev_open+0xfa/0x165
[57848.106019] 0 chrdev_open+0x120/0x165 <- chrdev_open+0x11c/0x165
[57848.106019] 0 chrdev_open+0x134/0x165 <- chrdev_open+0x12b/0x165
[57848.106019] 0 open+0x0/0x14 [oops] <- chrdev_open+0x144/0x165
[57848.106019] 0 page_fault+0x0/0x30 <- open+0x6/0x14 [oops]
[57848.106019] 0 error_entry+0x0/0x5b <- page_fault+0x4/0x30
[57848.106019] 0 error_kernelspace+0x0/0x31 <- error_entry+0x59/0x5b
[57848.106019] 0 error_sti+0x0/0x1 <- error_kernelspace+0x2d/0x31
[57848.106019] 0 page_fault+0x9/0x30 <- error_sti+0x0/0x1
[57848.106019] 0 do_page_fault+0x0/0x881 <- page_fault+0x1a/0x30
[...]
[57848.106019] 0 do_page_fault+0x66b/0x881 <- is_prefetch+0x1ee/0x1f2
[57848.106019] 0 do_page_fault+0x6e0/0x881 <- do_page_fault+0x67a/0x881
[57848.106019] 0 oops_begin+0x0/0x96 <- do_page_fault+0x6e0/0x881
[57848.106019] 0 trace_hw_branch_oops+0x0/0x2d <- oops_begin+0x9/0x96
[...]
[57848.106019] 0 ds_suspend_bts+0x2a/0xe3 <- ds_suspend_bts+0x1a/0xe3
[57848.106019] ---------------------------------
[57848.106019] CPU 0
[57848.106019] Modules linked in: oops
[57848.106019] Pid: 5542, comm: cat Tainted: G W 2.6.28 #23
[57848.106019] RIP: 0010:[<ffffffffa0000006>] [<ffffffffa0000006>] open+0x6/0x14 [oops]
[57848.106019] RSP: 0018:ffff880235457d48 EFLAGS: 00010246
[...]
dynamic ftrace
dynamic ftrace
--------------
--------------
...
...
arch/x86/kernel/ds.c
View file @
03b30d15
...
@@ -15,8 +15,8 @@
...
@@ -15,8 +15,8 @@
* - buffer allocation (memory accounting)
* - buffer allocation (memory accounting)
*
*
*
*
* Copyright (C) 2007-200
8
Intel Corporation.
* Copyright (C) 2007-200
9
Intel Corporation.
* Markus Metzger <markus.t.metzger@intel.com>, 2007-200
8
* Markus Metzger <markus.t.metzger@intel.com>, 2007-200
9
*/
*/
...
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
...
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
}
}
static
const
struct
ds_configuration
ds_cfg_netburst
=
{
static
const
struct
ds_configuration
ds_cfg_netburst
=
{
.
name
=
"
n
etburst"
,
.
name
=
"
N
etburst"
,
.
ctl
[
dsf_bts
]
=
(
1
<<
2
)
|
(
1
<<
3
),
.
ctl
[
dsf_bts
]
=
(
1
<<
2
)
|
(
1
<<
3
),
.
ctl
[
dsf_bts_kernel
]
=
(
1
<<
5
),
.
ctl
[
dsf_bts_kernel
]
=
(
1
<<
5
),
.
ctl
[
dsf_bts_user
]
=
(
1
<<
6
),
.
ctl
[
dsf_bts_user
]
=
(
1
<<
6
),
...
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = {
...
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = {
#endif
#endif
};
};
static
const
struct
ds_configuration
ds_cfg_pentium_m
=
{
static
const
struct
ds_configuration
ds_cfg_pentium_m
=
{
.
name
=
"
pentium m
"
,
.
name
=
"
Pentium M
"
,
.
ctl
[
dsf_bts
]
=
(
1
<<
6
)
|
(
1
<<
7
),
.
ctl
[
dsf_bts
]
=
(
1
<<
6
)
|
(
1
<<
7
),
.
sizeof_field
=
sizeof
(
long
),
.
sizeof_field
=
sizeof
(
long
),
...
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = {
...
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = {
.
sizeof_rec
[
ds_pebs
]
=
sizeof
(
long
)
*
18
,
.
sizeof_rec
[
ds_pebs
]
=
sizeof
(
long
)
*
18
,
#endif
#endif
};
};
static
const
struct
ds_configuration
ds_cfg_core2
=
{
static
const
struct
ds_configuration
ds_cfg_core2
_atom
=
{
.
name
=
"
core 2
"
,
.
name
=
"
Core 2/Atom
"
,
.
ctl
[
dsf_bts
]
=
(
1
<<
6
)
|
(
1
<<
7
),
.
ctl
[
dsf_bts
]
=
(
1
<<
6
)
|
(
1
<<
7
),
.
ctl
[
dsf_bts_kernel
]
=
(
1
<<
9
),
.
ctl
[
dsf_bts_kernel
]
=
(
1
<<
9
),
.
ctl
[
dsf_bts_user
]
=
(
1
<<
10
),
.
ctl
[
dsf_bts_user
]
=
(
1
<<
10
),
...
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
...
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
switch
(
c
->
x86
)
{
switch
(
c
->
x86
)
{
case
0x6
:
case
0x6
:
switch
(
c
->
x86_model
)
{
switch
(
c
->
x86_model
)
{
case
0
...
0xC
:
case
0x9
:
/* sorry, don't know about them */
case
0xd
:
/* Pentium M */
break
;
case
0xD
:
case
0xE
:
/* Pentium M */
ds_configure
(
&
ds_cfg_pentium_m
);
ds_configure
(
&
ds_cfg_pentium_m
);
break
;
break
;
default:
/* Core2, Atom, ... */
case
0xf
:
ds_configure
(
&
ds_cfg_core2
);
case
0x17
:
/* Core2 */
case
0x1c
:
/* Atom */
ds_configure
(
&
ds_cfg_core2_atom
);
break
;
case
0x1a
:
/* i7 */
default:
/* sorry, don't know about them */
break
;
break
;
}
}
break
;
break
;
case
0x
F
:
case
0x
f
:
switch
(
c
->
x86_model
)
{
switch
(
c
->
x86_model
)
{
case
0x0
:
case
0x0
:
case
0x1
:
case
0x1
:
...
...
arch/x86/kernel/dumpstack.c
View file @
03b30d15
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
#include <linux/sysfs.h>
#include <linux/ftrace.h>
#include <asm/stacktrace.h>
#include <asm/stacktrace.h>
...
@@ -195,6 +196,11 @@ unsigned __kprobes long oops_begin(void)
...
@@ -195,6 +196,11 @@ unsigned __kprobes long oops_begin(void)
int
cpu
;
int
cpu
;
unsigned
long
flags
;
unsigned
long
flags
;
/* notify the hw-branch tracer so it may disable tracing and
add the last trace to the trace buffer -
the earlier this happens, the more useful the trace. */
trace_hw_branch_oops
();
oops_enter
();
oops_enter
();
/* racy, but better than risking deadlock. */
/* racy, but better than risking deadlock. */
...
...
include/linux/ftrace.h
View file @
03b30d15
...
@@ -496,4 +496,17 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
...
@@ -496,4 +496,17 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
#endif
/* CONFIG_TRACING */
#endif
/* CONFIG_TRACING */
#ifdef CONFIG_HW_BRANCH_TRACER
void
trace_hw_branch
(
u64
from
,
u64
to
);
void
trace_hw_branch_oops
(
void
);
#else
/* CONFIG_HW_BRANCH_TRACER */
static
inline
void
trace_hw_branch
(
u64
from
,
u64
to
)
{}
static
inline
void
trace_hw_branch_oops
(
void
)
{}
#endif
/* CONFIG_HW_BRANCH_TRACER */
#endif
/* _LINUX_FTRACE_H */
#endif
/* _LINUX_FTRACE_H */
kernel/trace/trace.h
View file @
03b30d15
...
@@ -438,7 +438,6 @@ void trace_function(struct trace_array *tr,
...
@@ -438,7 +438,6 @@ void trace_function(struct trace_array *tr,
void
trace_graph_return
(
struct
ftrace_graph_ret
*
trace
);
void
trace_graph_return
(
struct
ftrace_graph_ret
*
trace
);
int
trace_graph_entry
(
struct
ftrace_graph_ent
*
trace
);
int
trace_graph_entry
(
struct
ftrace_graph_ent
*
trace
);
void
trace_hw_branch
(
struct
trace_array
*
tr
,
u64
from
,
u64
to
);
void
tracing_start_cmdline_record
(
void
);
void
tracing_start_cmdline_record
(
void
);
void
tracing_stop_cmdline_record
(
void
);
void
tracing_stop_cmdline_record
(
void
);
...
...
kernel/trace/trace_hw_branches.c
View file @
03b30d15
/*
/*
* h/w branch tracer for x86 based on bts
* h/w branch tracer for x86 based on bts
*
*
* Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
* Copyright (C) 2008-2009 Intel Corporation.
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
*
*
*/
*/
...
@@ -10,6 +11,9 @@
...
@@ -10,6 +11,9 @@
#include <linux/debugfs.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/ds.h>
#include <asm/ds.h>
...
@@ -19,13 +23,32 @@
...
@@ -19,13 +23,32 @@
#define SIZEOF_BTS (1 << 13)
#define SIZEOF_BTS (1 << 13)
/* The tracer mutex protects the below per-cpu tracer array.
It needs to be held to:
- start tracing on all cpus
- stop tracing on all cpus
- start tracing on a single hotplug cpu
- stop tracing on a single hotplug cpu
- read the trace from all cpus
- read the trace from a single cpu
*/
static
DEFINE_MUTEX
(
bts_tracer_mutex
);
static
DEFINE_PER_CPU
(
struct
bts_tracer
*
,
tracer
);
static
DEFINE_PER_CPU
(
struct
bts_tracer
*
,
tracer
);
static
DEFINE_PER_CPU
(
unsigned
char
[
SIZEOF_BTS
],
buffer
);
static
DEFINE_PER_CPU
(
unsigned
char
[
SIZEOF_BTS
],
buffer
);
#define this_tracer per_cpu(tracer, smp_processor_id())
#define this_tracer per_cpu(tracer, smp_processor_id())
#define this_buffer per_cpu(buffer, smp_processor_id())
#define this_buffer per_cpu(buffer, smp_processor_id())
static
int
__read_mostly
trace_hw_branches_enabled
;
static
struct
trace_array
*
hw_branch_trace
__read_mostly
;
/*
* Start tracing on the current cpu.
* The argument is ignored.
*
* pre: bts_tracer_mutex must be locked.
*/
static
void
bts_trace_start_cpu
(
void
*
arg
)
static
void
bts_trace_start_cpu
(
void
*
arg
)
{
{
if
(
this_tracer
)
if
(
this_tracer
)
...
@@ -43,14 +66,20 @@ static void bts_trace_start_cpu(void *arg)
...
@@ -43,14 +66,20 @@ static void bts_trace_start_cpu(void *arg)
static
void
bts_trace_start
(
struct
trace_array
*
tr
)
static
void
bts_trace_start
(
struct
trace_array
*
tr
)
{
{
int
cpu
;
mutex_lock
(
&
bts_tracer_mutex
)
;
tracing_reset_online_cpus
(
tr
);
on_each_cpu
(
bts_trace_start_cpu
,
NULL
,
1
);
trace_hw_branches_enabled
=
1
;
for_each_cpu
(
cpu
,
cpu_possible_mask
)
mutex_unlock
(
&
bts_tracer_mutex
);
smp_call_function_single
(
cpu
,
bts_trace_start_cpu
,
NULL
,
1
);
}
}
/*
* Start tracing on the current cpu.
* The argument is ignored.
*
* pre: bts_tracer_mutex must be locked.
*/
static
void
bts_trace_stop_cpu
(
void
*
arg
)
static
void
bts_trace_stop_cpu
(
void
*
arg
)
{
{
if
(
this_tracer
)
{
if
(
this_tracer
)
{
...
@@ -61,26 +90,63 @@ static void bts_trace_stop_cpu(void *arg)
...
@@ -61,26 +90,63 @@ static void bts_trace_stop_cpu(void *arg)
static
void
bts_trace_stop
(
struct
trace_array
*
tr
)
static
void
bts_trace_stop
(
struct
trace_array
*
tr
)
{
{
int
cpu
;
mutex_lock
(
&
bts_tracer_mutex
);
trace_hw_branches_enabled
=
0
;
on_each_cpu
(
bts_trace_stop_cpu
,
NULL
,
1
);
for_each_cpu
(
cpu
,
cpu_possible_mask
)
mutex_unlock
(
&
bts_tracer_mutex
);
}
static
int
__cpuinit
bts_hotcpu_handler
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
{
unsigned
int
cpu
=
(
unsigned
long
)
hcpu
;
mutex_lock
(
&
bts_tracer_mutex
);
if
(
!
trace_hw_branches_enabled
)
goto
out
;
switch
(
action
)
{
case
CPU_ONLINE
:
case
CPU_DOWN_FAILED
:
smp_call_function_single
(
cpu
,
bts_trace_start_cpu
,
NULL
,
1
);
break
;
case
CPU_DOWN_PREPARE
:
smp_call_function_single
(
cpu
,
bts_trace_stop_cpu
,
NULL
,
1
);
smp_call_function_single
(
cpu
,
bts_trace_stop_cpu
,
NULL
,
1
);
break
;
}
out:
mutex_unlock
(
&
bts_tracer_mutex
);
return
NOTIFY_DONE
;
}
}
static
struct
notifier_block
bts_hotcpu_notifier
__cpuinitdata
=
{
.
notifier_call
=
bts_hotcpu_handler
};
static
int
bts_trace_init
(
struct
trace_array
*
tr
)
static
int
bts_trace_init
(
struct
trace_array
*
tr
)
{
{
hw_branch_trace
=
tr
;
register_hotcpu_notifier
(
&
bts_hotcpu_notifier
);
tracing_reset_online_cpus
(
tr
);
tracing_reset_online_cpus
(
tr
);
bts_trace_start
(
tr
);
bts_trace_start
(
tr
);
return
0
;
return
0
;
}
}
static
void
bts_trace_reset
(
struct
trace_array
*
tr
)
{
bts_trace_stop
(
tr
);
unregister_hotcpu_notifier
(
&
bts_hotcpu_notifier
);
}
static
void
bts_trace_print_header
(
struct
seq_file
*
m
)
static
void
bts_trace_print_header
(
struct
seq_file
*
m
)
{
{
seq_puts
(
m
,
seq_puts
(
m
,
"# CPU# TO <- FROM
\n
"
);
"# CPU# FROM TO FUNCTION
\n
"
);
seq_puts
(
m
,
"# | | | |
\n
"
);
}
}
static
enum
print_line_t
bts_trace_print_line
(
struct
trace_iterator
*
iter
)
static
enum
print_line_t
bts_trace_print_line
(
struct
trace_iterator
*
iter
)
...
@@ -88,15 +154,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
...
@@ -88,15 +154,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
struct
trace_entry
*
entry
=
iter
->
ent
;
struct
trace_entry
*
entry
=
iter
->
ent
;
struct
trace_seq
*
seq
=
&
iter
->
seq
;
struct
trace_seq
*
seq
=
&
iter
->
seq
;
struct
hw_branch_entry
*
it
;
struct
hw_branch_entry
*
it
;
unsigned
long
symflags
=
TRACE_ITER_SYM_OFFSET
;
trace_assign_type
(
it
,
entry
);
trace_assign_type
(
it
,
entry
);
if
(
entry
->
type
==
TRACE_HW_BRANCHES
)
{
if
(
entry
->
type
==
TRACE_HW_BRANCHES
)
{
if
(
trace_seq_printf
(
seq
,
"%4d "
,
entry
->
cpu
)
&&
if
(
trace_seq_printf
(
seq
,
"%4d "
,
entry
->
cpu
)
&&
trace_seq_printf
(
seq
,
"0x%016llx -> 0x%016llx "
,
seq_print_ip_sym
(
seq
,
it
->
to
,
symflags
)
&&
it
->
from
,
it
->
to
)
&&
trace_seq_printf
(
seq
,
"
\t
<- "
)
&&
(
!
it
->
from
||
seq_print_ip_sym
(
seq
,
it
->
from
,
symflags
)
&&
seq_print_ip_sym
(
seq
,
it
->
from
,
/* sym_flags = */
0
))
&&
trace_seq_printf
(
seq
,
"
\n
"
))
trace_seq_printf
(
seq
,
"
\n
"
))
return
TRACE_TYPE_HANDLED
;
return
TRACE_TYPE_HANDLED
;
return
TRACE_TYPE_PARTIAL_LINE
;;
return
TRACE_TYPE_PARTIAL_LINE
;;
...
@@ -104,26 +170,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
...
@@ -104,26 +170,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
return
TRACE_TYPE_UNHANDLED
;
return
TRACE_TYPE_UNHANDLED
;
}
}
void
trace_hw_branch
(
struct
trace_array
*
tr
,
u64
from
,
u64
to
)
void
trace_hw_branch
(
u64
from
,
u64
to
)
{
{
struct
trace_array
*
tr
=
hw_branch_trace
;
struct
ring_buffer_event
*
event
;
struct
ring_buffer_event
*
event
;
struct
hw_branch_entry
*
entry
;
struct
hw_branch_entry
*
entry
;
unsigned
long
irq
;
unsigned
long
irq1
,
irq2
;
int
cpu
;
event
=
ring_buffer_lock_reserve
(
tr
->
buffer
,
sizeof
(
*
entry
),
&
irq
);
if
(
unlikely
(
!
tr
))
if
(
!
event
)
return
;
return
;
if
(
unlikely
(
!
trace_hw_branches_enabled
))
return
;
local_irq_save
(
irq1
);
cpu
=
raw_smp_processor_id
();
if
(
atomic_inc_return
(
&
tr
->
data
[
cpu
]
->
disabled
)
!=
1
)
goto
out
;
event
=
ring_buffer_lock_reserve
(
tr
->
buffer
,
sizeof
(
*
entry
),
&
irq2
);
if
(
!
event
)
goto
out
;
entry
=
ring_buffer_event_data
(
event
);
entry
=
ring_buffer_event_data
(
event
);
tracing_generic_entry_update
(
&
entry
->
ent
,
0
,
from
);
tracing_generic_entry_update
(
&
entry
->
ent
,
0
,
from
);
entry
->
ent
.
type
=
TRACE_HW_BRANCHES
;
entry
->
ent
.
type
=
TRACE_HW_BRANCHES
;
entry
->
ent
.
cpu
=
smp_processor_id
()
;
entry
->
ent
.
cpu
=
cpu
;
entry
->
from
=
from
;
entry
->
from
=
from
;
entry
->
to
=
to
;
entry
->
to
=
to
;
ring_buffer_unlock_commit
(
tr
->
buffer
,
event
,
irq
);
ring_buffer_unlock_commit
(
tr
->
buffer
,
event
,
irq2
);
out:
atomic_dec
(
&
tr
->
data
[
cpu
]
->
disabled
);
local_irq_restore
(
irq1
);
}
}
static
void
trace_bts_at
(
struct
trace_array
*
tr
,
static
void
trace_bts_at
(
const
struct
bts_trace
*
trace
,
void
*
at
)
const
struct
bts_trace
*
trace
,
void
*
at
)
{
{
struct
bts_struct
bts
;
struct
bts_struct
bts
;
int
err
=
0
;
int
err
=
0
;
...
@@ -138,18 +220,29 @@ static void trace_bts_at(struct trace_array *tr,
...
@@ -138,18 +220,29 @@ static void trace_bts_at(struct trace_array *tr,
switch
(
bts
.
qualifier
)
{
switch
(
bts
.
qualifier
)
{
case
BTS_BRANCH
:
case
BTS_BRANCH
:
trace_hw_branch
(
tr
,
bts
.
variant
.
lbr
.
from
,
bts
.
variant
.
lbr
.
to
);
trace_hw_branch
(
bts
.
variant
.
lbr
.
from
,
bts
.
variant
.
lbr
.
to
);
break
;
break
;
}
}
}
}
/*
* Collect the trace on the current cpu and write it into the ftrace buffer.
*
* pre: bts_tracer_mutex must be locked
*/
static
void
trace_bts_cpu
(
void
*
arg
)
static
void
trace_bts_cpu
(
void
*
arg
)
{
{
struct
trace_array
*
tr
=
(
struct
trace_array
*
)
arg
;
struct
trace_array
*
tr
=
(
struct
trace_array
*
)
arg
;
const
struct
bts_trace
*
trace
;
const
struct
bts_trace
*
trace
;
unsigned
char
*
at
;
unsigned
char
*
at
;
if
(
!
this_tracer
)
if
(
unlikely
(
!
tr
))
return
;
if
(
unlikely
(
atomic_read
(
&
tr
->
data
[
raw_smp_processor_id
()]
->
disabled
)))
return
;
if
(
unlikely
(
!
this_tracer
))
return
;
return
;
ds_suspend_bts
(
this_tracer
);
ds_suspend_bts
(
this_tracer
);
...
@@ -159,11 +252,11 @@ static void trace_bts_cpu(void *arg)
...
@@ -159,11 +252,11 @@ static void trace_bts_cpu(void *arg)
for
(
at
=
trace
->
ds
.
top
;
(
void
*
)
at
<
trace
->
ds
.
end
;
for
(
at
=
trace
->
ds
.
top
;
(
void
*
)
at
<
trace
->
ds
.
end
;
at
+=
trace
->
ds
.
size
)
at
+=
trace
->
ds
.
size
)
trace_bts_at
(
tr
,
tr
ace
,
at
);
trace_bts_at
(
trace
,
at
);
for
(
at
=
trace
->
ds
.
begin
;
(
void
*
)
at
<
trace
->
ds
.
top
;
for
(
at
=
trace
->
ds
.
begin
;
(
void
*
)
at
<
trace
->
ds
.
top
;
at
+=
trace
->
ds
.
size
)
at
+=
trace
->
ds
.
size
)
trace_bts_at
(
tr
,
tr
ace
,
at
);
trace_bts_at
(
trace
,
at
);
out:
out:
ds_resume_bts
(
this_tracer
);
ds_resume_bts
(
this_tracer
);
...
@@ -171,22 +264,38 @@ static void trace_bts_cpu(void *arg)
...
@@ -171,22 +264,38 @@ static void trace_bts_cpu(void *arg)
static
void
trace_bts_prepare
(
struct
trace_iterator
*
iter
)
static
void
trace_bts_prepare
(
struct
trace_iterator
*
iter
)
{
{
int
cpu
;
mutex_lock
(
&
bts_tracer_mutex
);
on_each_cpu
(
trace_bts_cpu
,
iter
->
tr
,
1
);
mutex_unlock
(
&
bts_tracer_mutex
);
}
static
void
trace_bts_close
(
struct
trace_iterator
*
iter
)
{
tracing_reset_online_cpus
(
iter
->
tr
);
}
void
trace_hw_branch_oops
(
void
)
{
mutex_lock
(
&
bts_tracer_mutex
);
trace_bts_cpu
(
hw_branch_trace
);
for_each_cpu
(
cpu
,
cpu_possible_mask
)
mutex_unlock
(
&
bts_tracer_mutex
);
smp_call_function_single
(
cpu
,
trace_bts_cpu
,
iter
->
tr
,
1
);
}
}
struct
tracer
bts_tracer
__read_mostly
=
struct
tracer
bts_tracer
__read_mostly
=
{
{
.
name
=
"hw-branch-tracer"
,
.
name
=
"hw-branch-tracer"
,
.
init
=
bts_trace_init
,
.
init
=
bts_trace_init
,
.
reset
=
bts_trace_
stop
,
.
reset
=
bts_trace_
reset
,
.
print_header
=
bts_trace_print_header
,
.
print_header
=
bts_trace_print_header
,
.
print_line
=
bts_trace_print_line
,
.
print_line
=
bts_trace_print_line
,
.
start
=
bts_trace_start
,
.
start
=
bts_trace_start
,
.
stop
=
bts_trace_stop
,
.
stop
=
bts_trace_stop
,
.
open
=
trace_bts_prepare
.
open
=
trace_bts_prepare
,
.
close
=
trace_bts_close
};
};
__init
static
int
init_bts_trace
(
void
)
__init
static
int
init_bts_trace
(
void
)
...
...
kernel/trace/trace_workqueue.c
View file @
03b30d15
...
@@ -8,6 +8,7 @@
...
@@ -8,6 +8,7 @@
#include <trace/workqueue.h>
#include <trace/workqueue.h>
#include <linux/list.h>
#include <linux/list.h>
#include <linux/percpu.h>
#include "trace_stat.h"
#include "trace_stat.h"
#include "trace.h"
#include "trace.h"
...
@@ -37,7 +38,8 @@ struct workqueue_global_stats {
...
@@ -37,7 +38,8 @@ struct workqueue_global_stats {
/* Don't need a global lock because allocated before the workqueues, and
/* Don't need a global lock because allocated before the workqueues, and
* never freed.
* never freed.
*/
*/
static
struct
workqueue_global_stats
*
all_workqueue_stat
;
static
DEFINE_PER_CPU
(
struct
workqueue_global_stats
,
all_workqueue_stat
);
#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
/* Insertion of a work */
/* Insertion of a work */
static
void
static
void
...
@@ -48,8 +50,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
...
@@ -48,8 +50,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
struct
cpu_workqueue_stats
*
node
,
*
next
;
struct
cpu_workqueue_stats
*
node
,
*
next
;
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
all_workqueue_stat
[
cpu
].
list
,
list_for_each_entry_safe
(
node
,
next
,
&
workqueue_cpu_stat
(
cpu
)
->
list
,
list
)
{
list
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
atomic_inc
(
&
node
->
inserted
);
atomic_inc
(
&
node
->
inserted
);
...
@@ -58,7 +60,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
...
@@ -58,7 +60,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
}
}
pr_debug
(
"trace_workqueue: entry not found
\n
"
);
pr_debug
(
"trace_workqueue: entry not found
\n
"
);
found:
found:
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
}
}
/* Execution of a work */
/* Execution of a work */
...
@@ -70,8 +72,8 @@ probe_workqueue_execution(struct task_struct *wq_thread,
...
@@ -70,8 +72,8 @@ probe_workqueue_execution(struct task_struct *wq_thread,
struct
cpu_workqueue_stats
*
node
,
*
next
;
struct
cpu_workqueue_stats
*
node
,
*
next
;
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
all_workqueue_stat
[
cpu
].
list
,
list_for_each_entry_safe
(
node
,
next
,
&
workqueue_cpu_stat
(
cpu
)
->
list
,
list
)
{
list
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
node
->
executed
++
;
node
->
executed
++
;
...
@@ -80,7 +82,7 @@ probe_workqueue_execution(struct task_struct *wq_thread,
...
@@ -80,7 +82,7 @@ probe_workqueue_execution(struct task_struct *wq_thread,
}
}
pr_debug
(
"trace_workqueue: entry not found
\n
"
);
pr_debug
(
"trace_workqueue: entry not found
\n
"
);
found:
found:
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
}
}
/* Creation of a cpu workqueue thread */
/* Creation of a cpu workqueue thread */
...
@@ -104,11 +106,11 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
...
@@ -104,11 +106,11 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
cws
->
pid
=
wq_thread
->
pid
;
cws
->
pid
=
wq_thread
->
pid
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
if
(
list_empty
(
&
all_workqueue_stat
[
cpu
].
list
))
if
(
list_empty
(
&
workqueue_cpu_stat
(
cpu
)
->
list
))
cws
->
first_entry
=
true
;
cws
->
first_entry
=
true
;
list_add_tail
(
&
cws
->
list
,
&
all_workqueue_stat
[
cpu
].
list
);
list_add_tail
(
&
cws
->
list
,
&
workqueue_cpu_stat
(
cpu
)
->
list
);
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
}
}
/* Destruction of a cpu workqueue thread */
/* Destruction of a cpu workqueue thread */
...
@@ -119,8 +121,8 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
...
@@ -119,8 +121,8 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
struct
cpu_workqueue_stats
*
node
,
*
next
;
struct
cpu_workqueue_stats
*
node
,
*
next
;
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
list_for_each_entry_safe
(
node
,
next
,
&
all_workqueue_stat
[
cpu
].
list
,
list_for_each_entry_safe
(
node
,
next
,
&
workqueue_cpu_stat
(
cpu
)
->
list
,
list
)
{
list
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
if
(
node
->
pid
==
wq_thread
->
pid
)
{
list_del
(
&
node
->
list
);
list_del
(
&
node
->
list
);
...
@@ -131,7 +133,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
...
@@ -131,7 +133,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
pr_debug
(
"trace_workqueue: don't find workqueue to destroy
\n
"
);
pr_debug
(
"trace_workqueue: don't find workqueue to destroy
\n
"
);
found:
found:
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
}
}
...
@@ -141,13 +143,13 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
...
@@ -141,13 +143,13 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
struct
cpu_workqueue_stats
*
ret
=
NULL
;
struct
cpu_workqueue_stats
*
ret
=
NULL
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
if
(
!
list_empty
(
&
all_workqueue_stat
[
cpu
].
list
))
if
(
!
list_empty
(
&
workqueue_cpu_stat
(
cpu
)
->
list
))
ret
=
list_entry
(
all_workqueue_stat
[
cpu
].
list
.
next
,
ret
=
list_entry
(
workqueue_cpu_stat
(
cpu
)
->
list
.
next
,
struct
cpu_workqueue_stats
,
list
);
struct
cpu_workqueue_stats
,
list
);
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
return
ret
;
return
ret
;
}
}
...
@@ -172,9 +174,9 @@ static void *workqueue_stat_next(void *prev, int idx)
...
@@ -172,9 +174,9 @@ static void *workqueue_stat_next(void *prev, int idx)
unsigned
long
flags
;
unsigned
long
flags
;
void
*
ret
=
NULL
;
void
*
ret
=
NULL
;
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
if
(
list_is_last
(
&
prev_cws
->
list
,
&
all_workqueue_stat
[
cpu
].
list
))
{
if
(
list_is_last
(
&
prev_cws
->
list
,
&
workqueue_cpu_stat
(
cpu
)
->
list
))
{
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
for
(
++
cpu
;
cpu
<
num_possible_cpus
();
cpu
++
)
{
for
(
++
cpu
;
cpu
<
num_possible_cpus
();
cpu
++
)
{
ret
=
workqueue_stat_start_cpu
(
cpu
);
ret
=
workqueue_stat_start_cpu
(
cpu
);
if
(
ret
)
if
(
ret
)
...
@@ -182,7 +184,7 @@ static void *workqueue_stat_next(void *prev, int idx)
...
@@ -182,7 +184,7 @@ static void *workqueue_stat_next(void *prev, int idx)
}
}
return
NULL
;
return
NULL
;
}
}
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
return
list_entry
(
prev_cws
->
list
.
next
,
struct
cpu_workqueue_stats
,
return
list_entry
(
prev_cws
->
list
.
next
,
struct
cpu_workqueue_stats
,
list
);
list
);
...
@@ -199,10 +201,10 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
...
@@ -199,10 +201,10 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
cws
->
executed
,
cws
->
executed
,
trace_find_cmdline
(
cws
->
pid
));
trace_find_cmdline
(
cws
->
pid
));
spin_lock_irqsave
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_lock_irqsave
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
if
(
&
cws
->
list
==
all_workqueue_stat
[
cpu
].
list
.
next
)
if
(
&
cws
->
list
==
workqueue_cpu_stat
(
cpu
)
->
list
.
next
)
seq_printf
(
s
,
"
\n
"
);
seq_printf
(
s
,
"
\n
"
);
spin_unlock_irqrestore
(
&
all_workqueue_stat
[
cpu
].
lock
,
flags
);
spin_unlock_irqrestore
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
,
flags
);
return
0
;
return
0
;
}
}
...
@@ -258,17 +260,9 @@ int __init trace_workqueue_early_init(void)
...
@@ -258,17 +260,9 @@ int __init trace_workqueue_early_init(void)
if
(
ret
)
if
(
ret
)
goto
no_creation
;
goto
no_creation
;
all_workqueue_stat
=
kmalloc
(
sizeof
(
struct
workqueue_global_stats
)
*
num_possible_cpus
(),
GFP_KERNEL
);
if
(
!
all_workqueue_stat
)
{
pr_warning
(
"trace_workqueue: not enough memory
\n
"
);
goto
no_creation
;
}
for_each_possible_cpu
(
cpu
)
{
for_each_possible_cpu
(
cpu
)
{
spin_lock_init
(
&
all_workqueue_stat
[
cpu
].
lock
);
spin_lock_init
(
&
workqueue_cpu_stat
(
cpu
)
->
lock
);
INIT_LIST_HEAD
(
&
all_workqueue_stat
[
cpu
].
list
);
INIT_LIST_HEAD
(
&
workqueue_cpu_stat
(
cpu
)
->
list
);
}
}
return
0
;
return
0
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment