Commit 96fa2b50 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'tracing-core-for-linus' of...

Merge branch 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (40 commits)
  tracing: Separate raw syscall from syscall tracer
  ring-buffer-benchmark: Add parameters to set produce/consumer priorities
  tracing, function tracer: Clean up strstrip() usage
  ring-buffer benchmark: Run producer/consumer threads at nice +19
  tracing: Remove the stale include/trace/power.h
  tracing: Only print objcopy version warning once from recordmcount
  tracing: Prevent build warning: 'ftrace_graph_buf' defined but not used
  ring-buffer: Move access to commit_page up into function used
  tracing: do not disable interrupts for trace_clock_local
  ring-buffer: Add multiple iterations between benchmark timestamps
  kprobes: Sanitize struct kretprobe_instance allocations
  tracing: Fix to use __always_unused attribute
  compiler: Introduce __always_unused
  tracing: Exit with error if a weak function is used in recordmcount.pl
  tracing: Move conditional into update_funcs() in recordmcount.pl
  tracing: Add regex for weak functions in recordmcount.pl
  tracing: Move mcount section search to front of loop in recordmcount.pl
  tracing: Fix objcopy revision check in recordmcount.pl
  tracing: Check absolute path of input file in recordmcount.pl
  tracing: Correct the check for number of arguments in recordmcount.pl
  ...
parents 7a797cdc b8007ef7
...@@ -778,6 +778,13 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -778,6 +778,13 @@ and is between 256 and 4096 characters. It is defined in the file
by the set_ftrace_notrace file in the debugfs by the set_ftrace_notrace file in the debugfs
tracing directory. tracing directory.
ftrace_graph_filter=[function-list]
[FTRACE] Limit the top level callers functions traced
by the function graph tracer at boot up.
function-list is a comma separated list of functions
that can be changed at run time by the
set_graph_function file in the debugfs tracing directory.
gamecon.map[2|3]= gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port) support via parallel port (up to 5 devices per port)
......
...@@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option. ...@@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option.
<details to be filled> <details to be filled>
HAVE_FTRACE_SYSCALLS HAVE_SYSCALL_TRACEPOINTS
--------------------- ---------------------
<details to be filled> You need very few things to get the syscalls tracing in an arch.
- Have a NR_syscalls variable in <asm/unistd.h> that provides the number
of syscalls supported by the arch.
- Implement arch_syscall_addr() that resolves a syscall address from a
syscall number.
- Support the TIF_SYSCALL_TRACEPOINT thread flags
- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
in the ptrace syscalls tracing path.
- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
HAVE_FTRACE_MCOUNT_RECORD HAVE_FTRACE_MCOUNT_RECORD
......
...@@ -379,6 +379,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc ...@@ -379,6 +379,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
PHONY += scripts_basic PHONY += scripts_basic
scripts_basic: scripts_basic:
$(Q)$(MAKE) $(build)=scripts/basic $(Q)$(MAKE) $(build)=scripts/basic
$(Q)rm -f .tmp_quiet_recordmcount
# To avoid any implicit rule to kick in, define an empty command. # To avoid any implicit rule to kick in, define an empty command.
scripts/basic/%: scripts_basic ; scripts/basic/%: scripts_basic ;
......
...@@ -203,73 +203,10 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) ...@@ -203,73 +203,10 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
#ifdef CONFIG_FTRACE_SYSCALLS #ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[];
extern unsigned int sys_call_table[]; extern unsigned int sys_call_table[];
static struct syscall_metadata **syscalls_metadata; unsigned long __init arch_syscall_addr(int nr)
struct syscall_metadata *syscall_nr_to_meta(int nr)
{
if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
return NULL;
return syscalls_metadata[nr];
}
int syscall_name_to_nr(char *name)
{
int i;
if (!syscalls_metadata)
return -1;
for (i = 0; i < NR_syscalls; i++)
if (syscalls_metadata[i])
if (!strcmp(syscalls_metadata[i]->name, name))
return i;
return -1;
}
void set_syscall_enter_id(int num, int id)
{
syscalls_metadata[num]->enter_id = id;
}
void set_syscall_exit_id(int num, int id)
{ {
syscalls_metadata[num]->exit_id = id; return (unsigned long)sys_call_table[nr];
}
static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
{
struct syscall_metadata *start;
struct syscall_metadata *stop;
char str[KSYM_SYMBOL_LEN];
start = (struct syscall_metadata *)__start_syscalls_metadata;
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
kallsyms_lookup(syscall, NULL, NULL, NULL, str);
for ( ; start < stop; start++) {
if (start->name && !strcmp(start->name + 3, str + 3))
return start;
}
return NULL;
}
static int __init arch_init_ftrace_syscalls(void)
{
struct syscall_metadata *meta;
int i;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
GFP_KERNEL);
if (!syscalls_metadata)
return -ENOMEM;
for (i = 0; i < NR_syscalls; i++) {
meta = find_syscall_meta((unsigned long)sys_call_table[i]);
syscalls_metadata[i] = meta;
}
return 0;
} }
arch_initcall(arch_init_ftrace_syscalls);
#endif #endif
...@@ -1185,17 +1185,14 @@ END(ftrace_graph_caller) ...@@ -1185,17 +1185,14 @@ END(ftrace_graph_caller)
.globl return_to_handler .globl return_to_handler
return_to_handler: return_to_handler:
pushl $0
pushl %eax pushl %eax
pushl %ecx
pushl %edx pushl %edx
movl %ebp, %eax movl %ebp, %eax
call ftrace_return_to_handler call ftrace_return_to_handler
movl %eax, 0xc(%esp) movl %eax, %ecx
popl %edx popl %edx
popl %ecx
popl %eax popl %eax
ret jmp *%ecx
#endif #endif
.section .rodata,"a" .section .rodata,"a"
......
...@@ -155,11 +155,11 @@ GLOBAL(return_to_handler) ...@@ -155,11 +155,11 @@ GLOBAL(return_to_handler)
call ftrace_return_to_handler call ftrace_return_to_handler
movq %rax, 16(%rsp) movq %rax, %rdi
movq 8(%rsp), %rdx movq 8(%rsp), %rdx
movq (%rsp), %rax movq (%rsp), %rax
addq $16, %rsp addq $24, %rsp
retq jmp *%rdi
#endif #endif
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
* the dangers of modifying code on the run. * the dangers of modifying code on the run.
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data) ...@@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data)
switch (faulted) { switch (faulted) {
case 0: case 0:
pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); pr_info("converting mcount calls to 0f 1f 44 00 00\n");
memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
break; break;
case 1: case 1:
pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); pr_info("converting mcount calls to 66 66 66 66 90\n");
memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
break; break;
case 2: case 2:
pr_info("ftrace: converting mcount calls to jmp . + 5\n"); pr_info("converting mcount calls to jmp . + 5\n");
memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
break; break;
} }
...@@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, ...@@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
#ifdef CONFIG_FTRACE_SYSCALLS #ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[];
extern unsigned long *sys_call_table; extern unsigned long *sys_call_table;
static struct syscall_metadata **syscalls_metadata; unsigned long __init arch_syscall_addr(int nr)
static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
{
struct syscall_metadata *start;
struct syscall_metadata *stop;
char str[KSYM_SYMBOL_LEN];
start = (struct syscall_metadata *)__start_syscalls_metadata;
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
for ( ; start < stop; start++) {
if (start->name && !strcmp(start->name, str))
return start;
}
return NULL;
}
struct syscall_metadata *syscall_nr_to_meta(int nr)
{
if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
return NULL;
return syscalls_metadata[nr];
}
int syscall_name_to_nr(char *name)
{
int i;
if (!syscalls_metadata)
return -1;
for (i = 0; i < NR_syscalls; i++) {
if (syscalls_metadata[i]) {
if (!strcmp(syscalls_metadata[i]->name, name))
return i;
}
}
return -1;
}
void set_syscall_enter_id(int num, int id)
{
syscalls_metadata[num]->enter_id = id;
}
void set_syscall_exit_id(int num, int id)
{
syscalls_metadata[num]->exit_id = id;
}
static int __init arch_init_ftrace_syscalls(void)
{ {
int i; return (unsigned long)(&sys_call_table)[nr];
struct syscall_metadata *meta;
unsigned long **psys_syscall_table = &sys_call_table;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
NR_syscalls, GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
return -ENOMEM;
}
for (i = 0; i < NR_syscalls; i++) {
meta = find_syscall_meta(psys_syscall_table[i]);
syscalls_metadata[i] = meta;
}
return 0;
} }
arch_initcall(arch_init_ftrace_syscalls);
#endif #endif
/* /*
* Written by Pekka Paalanen, 2008-2009 <pq@iki.fi> * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/mmiotrace.h> #include <linux/mmiotrace.h>
#define MODULE_NAME "testmmiotrace"
static unsigned long mmio_address; static unsigned long mmio_address;
module_param(mmio_address, ulong, 0); module_param(mmio_address, ulong, 0);
MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB " MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
...@@ -30,7 +31,7 @@ static unsigned v32(unsigned i) ...@@ -30,7 +31,7 @@ static unsigned v32(unsigned i)
static void do_write_test(void __iomem *p) static void do_write_test(void __iomem *p)
{ {
unsigned int i; unsigned int i;
pr_info(MODULE_NAME ": write test.\n"); pr_info("write test.\n");
mmiotrace_printk("Write test.\n"); mmiotrace_printk("Write test.\n");
for (i = 0; i < 256; i++) for (i = 0; i < 256; i++)
...@@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p) ...@@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p)
{ {
unsigned int i; unsigned int i;
unsigned errs[3] = { 0 }; unsigned errs[3] = { 0 };
pr_info(MODULE_NAME ": read test.\n"); pr_info("read test.\n");
mmiotrace_printk("Read test.\n"); mmiotrace_printk("Read test.\n");
for (i = 0; i < 256; i++) for (i = 0; i < 256; i++)
...@@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p) ...@@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p)
static void do_read_far_test(void __iomem *p) static void do_read_far_test(void __iomem *p)
{ {
pr_info(MODULE_NAME ": read far test.\n"); pr_info("read far test.\n");
mmiotrace_printk("Read far test.\n"); mmiotrace_printk("Read far test.\n");
ioread32(p + read_far); ioread32(p + read_far);
...@@ -78,7 +79,7 @@ static void do_test(unsigned long size) ...@@ -78,7 +79,7 @@ static void do_test(unsigned long size)
{ {
void __iomem *p = ioremap_nocache(mmio_address, size); void __iomem *p = ioremap_nocache(mmio_address, size);
if (!p) { if (!p) {
pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); pr_err("could not ioremap, aborting.\n");
return; return;
} }
mmiotrace_printk("ioremap returned %p.\n", p); mmiotrace_printk("ioremap returned %p.\n", p);
...@@ -94,24 +95,22 @@ static int __init init(void) ...@@ -94,24 +95,22 @@ static int __init init(void)
unsigned long size = (read_far) ? (8 << 20) : (16 << 10); unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
if (mmio_address == 0) { if (mmio_address == 0) {
pr_err(MODULE_NAME ": you have to use the module argument " pr_err("you have to use the module argument mmio_address.\n");
"mmio_address.\n"); pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n");
pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS"
" YOU REALLY KNOW WHAT YOU ARE DOING!\n");
return -ENXIO; return -ENXIO;
} }
pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI " pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, "
"address space, and writing 16 kB of rubbish in there.\n", "and writing 16 kB of rubbish in there.\n",
size >> 10, mmio_address); size >> 10, mmio_address);
do_test(size); do_test(size);
pr_info(MODULE_NAME ": All done.\n"); pr_info("All done.\n");
return 0; return 0;
} }
static void __exit cleanup(void) static void __exit cleanup(void)
{ {
pr_debug(MODULE_NAME ": unloaded.\n"); pr_debug("unloaded.\n");
} }
module_init(init); module_init(init);
......
...@@ -79,6 +79,7 @@ ...@@ -79,6 +79,7 @@
#define noinline __attribute__((noinline)) #define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__)) #define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused)) #define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
#define __gcc_header(x) #x #define __gcc_header(x) #x
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
......
...@@ -218,6 +218,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); ...@@ -218,6 +218,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __maybe_unused /* unimplemented */ # define __maybe_unused /* unimplemented */
#endif #endif
#ifndef __always_unused
# define __always_unused /* unimplemented */
#endif
#ifndef noinline #ifndef noinline
#define noinline #define noinline
#endif #endif
......
...@@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task) ...@@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
return 0; return 0;
} }
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); extern void __lockfunc
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); _lock_kernel(const char *func, const char *file, int line)
__acquires(kernel_lock);
extern void __lockfunc
_unlock_kernel(const char *func, const char *file, int line)
__releases(kernel_lock);
#define lock_kernel() do { \
_lock_kernel(__func__, __FILE__, __LINE__); \
} while (0)
#define unlock_kernel() do { \
_unlock_kernel(__func__, __FILE__, __LINE__); \
} while (0)
/* /*
* Various legacy drivers don't really need the BKL in a specific * Various legacy drivers don't really need the BKL in a specific
...@@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void) ...@@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void)
#else #else
#define lock_kernel() do { } while(0) #define lock_kernel()
#define unlock_kernel() do { } while(0) #define unlock_kernel()
#define release_kernel_lock(task) do { } while(0) #define release_kernel_lock(task) do { } while(0)
#define cycle_kernel_lock() do { } while(0) #define cycle_kernel_lock() do { } while(0)
#define reacquire_kernel_lock(task) 0 #define reacquire_kernel_lock(task) 0
......
#undef TRACE_SYSTEM
#define TRACE_SYSTEM bkl
#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BKL_H
#include <linux/tracepoint.h>
TRACE_EVENT(lock_kernel,
TP_PROTO(const char *func, const char *file, int line),
TP_ARGS(func, file, line),
TP_STRUCT__entry(
__field( int, lock_depth )
__field_ext( const char *, func, FILTER_PTR_STRING )
__field_ext( const char *, file, FILTER_PTR_STRING )
__field( int, line )
),
TP_fast_assign(
/* We want to record the lock_depth after lock is acquired */
__entry->lock_depth = current->lock_depth + 1;
__entry->func = func;
__entry->file = file;
__entry->line = line;
),
TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
__entry->file, __entry->line, __entry->func)
);
TRACE_EVENT(unlock_kernel,
TP_PROTO(const char *func, const char *file, int line),
TP_ARGS(func, file, line),
TP_STRUCT__entry(
__field(int, lock_depth)
__field(const char *, func)
__field(const char *, file)
__field(int, line)
),
TP_fast_assign(
__entry->lock_depth = current->lock_depth;
__entry->func = func;
__entry->file = file;
__entry->line = line;
),
TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
__entry->file, __entry->line, __entry->func)
);
#endif /* _TRACE_BKL_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM syscalls #define TRACE_SYSTEM raw_syscalls
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENTS_SYSCALLS_H #define _TRACE_EVENTS_SYSCALLS_H
......
#ifndef _TRACE_POWER_H
#define _TRACE_POWER_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
enum {
POWER_NONE = 0,
POWER_CSTATE = 1,
POWER_PSTATE = 2,
};
struct power_trace {
ktime_t stamp;
ktime_t end;
int type;
int state;
};
DECLARE_TRACE(power_start,
TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
TP_ARGS(it, type, state));
DECLARE_TRACE(power_mark,
TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
TP_ARGS(it, type, state));
DECLARE_TRACE(power_end,
TP_PROTO(struct power_trace *it),
TP_ARGS(it));
#endif /* _TRACE_POWER_H */
...@@ -33,7 +33,7 @@ struct syscall_metadata { ...@@ -33,7 +33,7 @@ struct syscall_metadata {
}; };
#ifdef CONFIG_FTRACE_SYSCALLS #ifdef CONFIG_FTRACE_SYSCALLS
extern struct syscall_metadata *syscall_nr_to_meta(int nr); extern unsigned long arch_syscall_addr(int nr);
extern int syscall_name_to_nr(char *name); extern int syscall_name_to_nr(char *name);
void set_syscall_enter_id(int num, int id); void set_syscall_enter_id(int num, int id);
void set_syscall_exit_id(int num, int id); void set_syscall_exit_id(int num, int id);
......
...@@ -1014,9 +1014,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp) ...@@ -1014,9 +1014,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
/* Pre-allocate memory for max kretprobe instances */ /* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0) { if (rp->maxactive <= 0) {
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
rp->maxactive = max(10, 2 * NR_CPUS); rp->maxactive = max(10, 2 * num_possible_cpus());
#else #else
rp->maxactive = NR_CPUS; rp->maxactive = num_possible_cpus();
#endif #endif
} }
spin_lock_init(&rp->lock); spin_lock_init(&rp->lock);
......
This diff is collapsed.
...@@ -1787,9 +1787,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1787,9 +1787,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
static struct ring_buffer_event * static struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length, unsigned long tail, unsigned long length, unsigned long tail,
struct buffer_page *commit_page,
struct buffer_page *tail_page, u64 *ts) struct buffer_page *tail_page, u64 *ts)
{ {
struct buffer_page *commit_page = cpu_buffer->commit_page;
struct ring_buffer *buffer = cpu_buffer->buffer; struct ring_buffer *buffer = cpu_buffer->buffer;
struct buffer_page *next_page; struct buffer_page *next_page;
int ret; int ret;
...@@ -1892,13 +1892,10 @@ static struct ring_buffer_event * ...@@ -1892,13 +1892,10 @@ static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
unsigned type, unsigned long length, u64 *ts) unsigned type, unsigned long length, u64 *ts)
{ {
struct buffer_page *tail_page, *commit_page; struct buffer_page *tail_page;
struct ring_buffer_event *event; struct ring_buffer_event *event;
unsigned long tail, write; unsigned long tail, write;
commit_page = cpu_buffer->commit_page;
/* we just need to protect against interrupts */
barrier();
tail_page = cpu_buffer->tail_page; tail_page = cpu_buffer->tail_page;
write = local_add_return(length, &tail_page->write); write = local_add_return(length, &tail_page->write);
...@@ -1909,7 +1906,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1909,7 +1906,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* See if we shot pass the end of this buffer page */ /* See if we shot pass the end of this buffer page */
if (write > BUF_PAGE_SIZE) if (write > BUF_PAGE_SIZE)
return rb_move_tail(cpu_buffer, length, tail, return rb_move_tail(cpu_buffer, length, tail,
commit_page, tail_page, ts); tail_page, ts);
/* We reserved something on the buffer */ /* We reserved something on the buffer */
......
...@@ -35,6 +35,28 @@ static int disable_reader; ...@@ -35,6 +35,28 @@ static int disable_reader;
module_param(disable_reader, uint, 0644); module_param(disable_reader, uint, 0644);
MODULE_PARM_DESC(disable_reader, "only run producer"); MODULE_PARM_DESC(disable_reader, "only run producer");
static int write_iteration = 50;
module_param(write_iteration, uint, 0644);
MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
static int producer_nice = 19;
static int consumer_nice = 19;
static int producer_fifo = -1;
static int consumer_fifo = -1;
module_param(producer_nice, uint, 0644);
MODULE_PARM_DESC(producer_nice, "nice prio for producer");
module_param(consumer_nice, uint, 0644);
MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
module_param(producer_fifo, uint, 0644);
MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
module_param(consumer_fifo, uint, 0644);
MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
static int read_events; static int read_events;
static int kill_test; static int kill_test;
...@@ -208,7 +230,9 @@ static void ring_buffer_producer(void) ...@@ -208,7 +230,9 @@ static void ring_buffer_producer(void)
do { do {
struct ring_buffer_event *event; struct ring_buffer_event *event;
int *entry; int *entry;
int i;
for (i = 0; i < write_iteration; i++) {
event = ring_buffer_lock_reserve(buffer, 10); event = ring_buffer_lock_reserve(buffer, 10);
if (!event) { if (!event) {
missed++; missed++;
...@@ -218,6 +242,7 @@ static void ring_buffer_producer(void) ...@@ -218,6 +242,7 @@ static void ring_buffer_producer(void)
*entry = smp_processor_id(); *entry = smp_processor_id();
ring_buffer_unlock_commit(buffer, event); ring_buffer_unlock_commit(buffer, event);
} }
}
do_gettimeofday(&end_tv); do_gettimeofday(&end_tv);
cnt++; cnt++;
...@@ -263,6 +288,27 @@ static void ring_buffer_producer(void) ...@@ -263,6 +288,27 @@ static void ring_buffer_producer(void)
if (kill_test) if (kill_test)
trace_printk("ERROR!\n"); trace_printk("ERROR!\n");
if (!disable_reader) {
if (consumer_fifo < 0)
trace_printk("Running Consumer at nice: %d\n",
consumer_nice);
else
trace_printk("Running Consumer at SCHED_FIFO %d\n",
consumer_fifo);
}
if (producer_fifo < 0)
trace_printk("Running Producer at nice: %d\n",
producer_nice);
else
trace_printk("Running Producer at SCHED_FIFO %d\n",
producer_fifo);
/* Let the user know that the test is running at low priority */
if (producer_fifo < 0 && consumer_fifo < 0 &&
producer_nice == 19 && consumer_nice == 19)
trace_printk("WARNING!!! This test is running at lowest priority.\n");
trace_printk("Time: %lld (usecs)\n", time); trace_printk("Time: %lld (usecs)\n", time);
trace_printk("Overruns: %lld\n", overruns); trace_printk("Overruns: %lld\n", overruns);
if (disable_reader) if (disable_reader)
...@@ -392,6 +438,27 @@ static int __init ring_buffer_benchmark_init(void) ...@@ -392,6 +438,27 @@ static int __init ring_buffer_benchmark_init(void)
if (IS_ERR(producer)) if (IS_ERR(producer))
goto out_kill; goto out_kill;
/*
* Run them as low-prio background tasks by default:
*/
if (!disable_reader) {
if (consumer_fifo >= 0) {
struct sched_param param = {
.sched_priority = consumer_fifo
};
sched_setscheduler(consumer, SCHED_FIFO, &param);
} else
set_user_nice(consumer, consumer_nice);
}
if (producer_fifo >= 0) {
struct sched_param param = {
.sched_priority = consumer_fifo
};
sched_setscheduler(producer, SCHED_FIFO, &param);
} else
set_user_nice(producer, producer_nice);
return 0; return 0;
out_kill: out_kill:
......
...@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf); ...@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf);
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer; static char *default_bootup_tracer;
static int __init set_ftrace(char *str) static int __init set_cmdline_ftrace(char *str)
{ {
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf; default_bootup_tracer = bootup_tracer_buf;
...@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str) ...@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str)
ring_buffer_expanded = 1; ring_buffer_expanded = 1;
return 1; return 1;
} }
__setup("ftrace=", set_ftrace); __setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str) static int __init set_ftrace_dump_on_oops(char *str)
{ {
......
...@@ -483,10 +483,6 @@ static inline int ftrace_graph_addr(unsigned long addr) ...@@ -483,10 +483,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
return 0; return 0;
} }
#else #else
static inline int ftrace_trace_addr(unsigned long addr)
{
return 1;
}
static inline int ftrace_graph_addr(unsigned long addr) static inline int ftrace_graph_addr(unsigned long addr)
{ {
return 1; return 1;
...@@ -500,12 +496,12 @@ print_graph_function(struct trace_iterator *iter) ...@@ -500,12 +496,12 @@ print_graph_function(struct trace_iterator *iter)
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
extern struct pid *ftrace_pid_trace; extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct task_struct *task)
{ {
if (!ftrace_pid_trace) if (list_empty(&ftrace_pids))
return 1; return 1;
return test_tsk_trace_trace(task); return test_tsk_trace_trace(task);
...@@ -699,15 +695,31 @@ struct event_subsystem { ...@@ -699,15 +695,31 @@ struct event_subsystem {
}; };
struct filter_pred; struct filter_pred;
struct regex;
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
int val1, int val2); int val1, int val2);
typedef int (*regex_match_func)(char *str, struct regex *r, int len);
enum regex_type {
MATCH_FULL,
MATCH_FRONT_ONLY,
MATCH_MIDDLE_ONLY,
MATCH_END_ONLY,
};
struct regex {
char pattern[MAX_FILTER_STR_VAL];
int len;
int field_len;
regex_match_func match;
};
struct filter_pred { struct filter_pred {
filter_pred_fn_t fn; filter_pred_fn_t fn;
u64 val; u64 val;
char str_val[MAX_FILTER_STR_VAL]; struct regex regex;
int str_len;
char *field_name; char *field_name;
int offset; int offset;
int not; int not;
...@@ -715,6 +727,8 @@ struct filter_pred { ...@@ -715,6 +727,8 @@ struct filter_pred {
int pop_n; int pop_n;
}; };
extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not);
extern void print_event_filter(struct ftrace_event_call *call, extern void print_event_filter(struct ftrace_event_call *call,
struct trace_seq *s); struct trace_seq *s);
extern int apply_event_filter(struct ftrace_event_call *call, extern int apply_event_filter(struct ftrace_event_call *call,
......
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/trace_clock.h> #include <linux/trace_clock.h>
#include "trace.h"
/* /*
* trace_clock_local(): the simplest and least coherent tracing clock. * trace_clock_local(): the simplest and least coherent tracing clock.
* *
...@@ -28,17 +30,17 @@ ...@@ -28,17 +30,17 @@
*/ */
u64 notrace trace_clock_local(void) u64 notrace trace_clock_local(void)
{ {
unsigned long flags;
u64 clock; u64 clock;
int resched;
/* /*
* sched_clock() is an architecture implemented, fast, scalable, * sched_clock() is an architecture implemented, fast, scalable,
* lockless clock. It is not guaranteed to be coherent across * lockless clock. It is not guaranteed to be coherent across
* CPUs, nor across CPU idle events. * CPUs, nor across CPU idle events.
*/ */
raw_local_irq_save(flags); resched = ftrace_preempt_disable();
clock = sched_clock(); clock = sched_clock();
raw_local_irq_restore(flags); ftrace_preempt_enable(resched);
return clock; return clock;
} }
......
...@@ -878,7 +878,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events) ...@@ -878,7 +878,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
"'%s/filter' entry\n", name); "'%s/filter' entry\n", name);
} }
entry = trace_create_file("enable", 0644, system->entry, trace_create_file("enable", 0644, system->entry,
(void *)system->name, (void *)system->name,
&ftrace_system_enable_fops); &ftrace_system_enable_fops);
...@@ -892,7 +892,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, ...@@ -892,7 +892,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
const struct file_operations *filter, const struct file_operations *filter,
const struct file_operations *format) const struct file_operations *format)
{ {
struct dentry *entry;
int ret; int ret;
/* /*
...@@ -910,11 +909,11 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, ...@@ -910,11 +909,11 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
} }
if (call->regfunc) if (call->regfunc)
entry = trace_create_file("enable", 0644, call->dir, call, trace_create_file("enable", 0644, call->dir, call,
enable); enable);
if (call->id && call->profile_enable) if (call->id && call->profile_enable)
entry = trace_create_file("id", 0444, call->dir, call, trace_create_file("id", 0444, call->dir, call,
id); id);
if (call->define_fields) { if (call->define_fields) {
...@@ -924,7 +923,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, ...@@ -924,7 +923,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
" events/%s\n", call->name); " events/%s\n", call->name);
return ret; return ret;
} }
entry = trace_create_file("filter", 0644, call->dir, call, trace_create_file("filter", 0644, call->dir, call,
filter); filter);
} }
...@@ -932,7 +931,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, ...@@ -932,7 +931,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
if (!call->show_format) if (!call->show_format)
return 0; return 0;
entry = trace_create_file("format", 0444, call->dir, call, trace_create_file("format", 0444, call->dir, call,
format); format);
return 0; return 0;
......
...@@ -18,8 +18,6 @@ ...@@ -18,8 +18,6 @@
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
*/ */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/mutex.h> #include <linux/mutex.h>
...@@ -197,9 +195,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event, ...@@ -197,9 +195,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
char *addr = (char *)(event + pred->offset); char *addr = (char *)(event + pred->offset);
int cmp, match; int cmp, match;
cmp = strncmp(addr, pred->str_val, pred->str_len); cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
match = (!cmp) ^ pred->not; match = cmp ^ pred->not;
return match; return match;
} }
...@@ -211,9 +209,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event, ...@@ -211,9 +209,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event,
char **addr = (char **)(event + pred->offset); char **addr = (char **)(event + pred->offset);
int cmp, match; int cmp, match;
cmp = strncmp(*addr, pred->str_val, pred->str_len); cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len);
match = (!cmp) ^ pred->not; match = cmp ^ pred->not;
return match; return match;
} }
...@@ -237,9 +235,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event, ...@@ -237,9 +235,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event,
char *addr = (char *)(event + str_loc); char *addr = (char *)(event + str_loc);
int cmp, match; int cmp, match;
cmp = strncmp(addr, pred->str_val, str_len); cmp = pred->regex.match(addr, &pred->regex, str_len);
match = (!cmp) ^ pred->not; match = cmp ^ pred->not;
return match; return match;
} }
...@@ -250,6 +248,124 @@ static int filter_pred_none(struct filter_pred *pred, void *event, ...@@ -250,6 +248,124 @@ static int filter_pred_none(struct filter_pred *pred, void *event,
return 0; return 0;
} }
/* Basic regex callbacks */
static int regex_match_full(char *str, struct regex *r, int len)
{
if (strncmp(str, r->pattern, len) == 0)
return 1;
return 0;
}
static int regex_match_front(char *str, struct regex *r, int len)
{
if (strncmp(str, r->pattern, len) == 0)
return 1;
return 0;
}
static int regex_match_middle(char *str, struct regex *r, int len)
{
if (strstr(str, r->pattern))
return 1;
return 0;
}
static int regex_match_end(char *str, struct regex *r, int len)
{
char *ptr = strstr(str, r->pattern);
if (ptr && (ptr[r->len] == 0))
return 1;
return 0;
}
/**
* filter_parse_regex - parse a basic regex
* @buff: the raw regex
* @len: length of the regex
* @search: will point to the beginning of the string to compare
* @not: tell whether the match will have to be inverted
*
* This passes in a buffer containing a regex and this function will
* set search to point to the search part of the buffer and
* return the type of search it is (see enum above).
* This does modify buff.
*
* Returns enum type.
* search returns the pointer to use for comparison.
* not returns 1 if buff started with a '!'
* 0 otherwise.
*/
enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
{
int type = MATCH_FULL;
int i;
if (buff[0] == '!') {
*not = 1;
buff++;
len--;
} else
*not = 0;
*search = buff;
for (i = 0; i < len; i++) {
if (buff[i] == '*') {
if (!i) {
*search = buff + 1;
type = MATCH_END_ONLY;
} else {
if (type == MATCH_END_ONLY)
type = MATCH_MIDDLE_ONLY;
else
type = MATCH_FRONT_ONLY;
buff[i] = 0;
break;
}
}
}
return type;
}
static int filter_build_regex(struct filter_pred *pred)
{
struct regex *r = &pred->regex;
char *search, *dup;
enum regex_type type;
int not;
type = filter_parse_regex(r->pattern, r->len, &search, &not);
dup = kstrdup(search, GFP_KERNEL);
if (!dup)
return -ENOMEM;
strcpy(r->pattern, dup);
kfree(dup);
r->len = strlen(r->pattern);
switch (type) {
case MATCH_FULL:
r->match = regex_match_full;
break;
case MATCH_FRONT_ONLY:
r->match = regex_match_front;
break;
case MATCH_MIDDLE_ONLY:
r->match = regex_match_middle;
break;
case MATCH_END_ONLY:
r->match = regex_match_end;
break;
}
pred->not ^= not;
return 0;
}
/* return 1 if event matches, 0 otherwise (discard) */ /* return 1 if event matches, 0 otherwise (discard) */
int filter_match_preds(struct ftrace_event_call *call, void *rec) int filter_match_preds(struct ftrace_event_call *call, void *rec)
{ {
...@@ -396,7 +512,7 @@ static void filter_clear_pred(struct filter_pred *pred) ...@@ -396,7 +512,7 @@ static void filter_clear_pred(struct filter_pred *pred)
{ {
kfree(pred->field_name); kfree(pred->field_name);
pred->field_name = NULL; pred->field_name = NULL;
pred->str_len = 0; pred->regex.len = 0;
} }
static int filter_set_pred(struct filter_pred *dest, static int filter_set_pred(struct filter_pred *dest,
...@@ -660,21 +776,24 @@ static int filter_add_pred(struct filter_parse_state *ps, ...@@ -660,21 +776,24 @@ static int filter_add_pred(struct filter_parse_state *ps,
} }
if (is_string_field(field)) { if (is_string_field(field)) {
pred->str_len = field->size; ret = filter_build_regex(pred);
if (ret)
return ret;
if (field->filter_type == FILTER_STATIC_STRING) if (field->filter_type == FILTER_STATIC_STRING) {
fn = filter_pred_string; fn = filter_pred_string;
else if (field->filter_type == FILTER_DYN_STRING) pred->regex.field_len = field->size;
} else if (field->filter_type == FILTER_DYN_STRING)
fn = filter_pred_strloc; fn = filter_pred_strloc;
else { else {
fn = filter_pred_pchar; fn = filter_pred_pchar;
pred->str_len = strlen(pred->str_val); pred->regex.field_len = strlen(pred->regex.pattern);
} }
} else { } else {
if (field->is_signed) if (field->is_signed)
ret = strict_strtoll(pred->str_val, 0, &val); ret = strict_strtoll(pred->regex.pattern, 0, &val);
else else
ret = strict_strtoull(pred->str_val, 0, &val); ret = strict_strtoull(pred->regex.pattern, 0, &val);
if (ret) { if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL; return -EINVAL;
...@@ -1045,8 +1164,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2) ...@@ -1045,8 +1164,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
return NULL; return NULL;
} }
strcpy(pred->str_val, operand2); strcpy(pred->regex.pattern, operand2);
pred->str_len = strlen(operand2); pred->regex.len = strlen(pred->regex.pattern);
pred->op = op; pred->op = op;
......
...@@ -48,11 +48,11 @@ ...@@ -48,11 +48,11 @@
struct ____ftrace_##name { \ struct ____ftrace_##name { \
tstruct \ tstruct \
}; \ }; \
static void __used ____ftrace_check_##name(void) \ static void __always_unused ____ftrace_check_##name(void) \
{ \ { \
struct ____ftrace_##name *__entry = NULL; \ struct ____ftrace_##name *__entry = NULL; \
\ \
/* force cmpile-time check on F_printk() */ \ /* force compile-time check on F_printk() */ \
printk(print); \ printk(print); \
} }
......
...@@ -14,6 +14,69 @@ static int sys_refcount_exit; ...@@ -14,6 +14,69 @@ static int sys_refcount_exit;
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[];
static struct syscall_metadata **syscalls_metadata;
static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
{
struct syscall_metadata *start;
struct syscall_metadata *stop;
char str[KSYM_SYMBOL_LEN];
start = (struct syscall_metadata *)__start_syscalls_metadata;
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
kallsyms_lookup(syscall, NULL, NULL, NULL, str);
for ( ; start < stop; start++) {
/*
* Only compare after the "sys" prefix. Archs that use
* syscall wrappers may have syscalls symbols aliases prefixed
* with "SyS" instead of "sys", leading to an unwanted
* mismatch.
*/
if (start->name && !strcmp(start->name + 3, str + 3))
return start;
}
return NULL;
}
static struct syscall_metadata *syscall_nr_to_meta(int nr)
{
if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
return NULL;
return syscalls_metadata[nr];
}
int syscall_name_to_nr(char *name)
{
int i;
if (!syscalls_metadata)
return -1;
for (i = 0; i < NR_syscalls; i++) {
if (syscalls_metadata[i]) {
if (!strcmp(syscalls_metadata[i]->name, name))
return i;
}
}
return -1;
}
void set_syscall_enter_id(int num, int id)
{
syscalls_metadata[num]->enter_id = id;
}
void set_syscall_exit_id(int num, int id)
{
syscalls_metadata[num]->exit_id = id;
}
enum print_line_t enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags) print_syscall_enter(struct trace_iterator *iter, int flags)
{ {
...@@ -375,6 +438,29 @@ struct trace_event event_syscall_exit = { ...@@ -375,6 +438,29 @@ struct trace_event event_syscall_exit = {
.trace = print_syscall_exit, .trace = print_syscall_exit,
}; };
int __init init_ftrace_syscalls(void)
{
struct syscall_metadata *meta;
unsigned long addr;
int i;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
NR_syscalls, GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
return -ENOMEM;
}
for (i = 0; i < NR_syscalls; i++) {
addr = arch_syscall_addr(i);
meta = find_syscall_meta(addr);
syscalls_metadata[i] = meta;
}
return 0;
}
core_initcall(init_ftrace_syscalls);
#ifdef CONFIG_EVENT_PROFILE #ifdef CONFIG_EVENT_PROFILE
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
......
...@@ -5,10 +5,13 @@ ...@@ -5,10 +5,13 @@
* relegated to obsolescence, but used by various less * relegated to obsolescence, but used by various less
* important (or lazy) subsystems. * important (or lazy) subsystems.
*/ */
#include <linux/smp_lock.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/smp_lock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/bkl.h>
/* /*
* The 'big kernel lock' * The 'big kernel lock'
...@@ -113,21 +116,26 @@ static inline void __unlock_kernel(void) ...@@ -113,21 +116,26 @@ static inline void __unlock_kernel(void)
* This cannot happen asynchronously, so we only need to * This cannot happen asynchronously, so we only need to
* worry about other CPU's. * worry about other CPU's.
*/ */
void __lockfunc lock_kernel(void) void __lockfunc _lock_kernel(const char *func, const char *file, int line)
{ {
int depth = current->lock_depth+1; int depth = current->lock_depth + 1;
trace_lock_kernel(func, file, line);
if (likely(!depth)) if (likely(!depth))
__lock_kernel(); __lock_kernel();
current->lock_depth = depth; current->lock_depth = depth;
} }
void __lockfunc unlock_kernel(void) void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
{ {
BUG_ON(current->lock_depth < 0); BUG_ON(current->lock_depth < 0);
if (likely(--current->lock_depth < 0)) if (likely(--current->lock_depth < 0))
__unlock_kernel(); __unlock_kernel();
trace_unlock_kernel(func, file, line);
} }
EXPORT_SYMBOL(lock_kernel); EXPORT_SYMBOL(_lock_kernel);
EXPORT_SYMBOL(unlock_kernel); EXPORT_SYMBOL(_unlock_kernel);
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment