Commit 819e50e2 authored by AKASHI Takahiro's avatar AKASHI Takahiro Committed by Will Deacon

arm64: Add ftrace support

This patch implements arm64 specific part to support function tracers,
such as function (CONFIG_FUNCTION_TRACER), function_graph
(CONFIG_FUNCTION_GRAPH_TRACER) and function profiler
(CONFIG_FUNCTION_PROFILER).

With 'function' tracer, all the functions in the kernel are traced with
timestamps in ${sysfs}/tracing/trace. If function_graph tracer is
specified, call graph is generated.

The kernel must be compiled with -pg option so that _mcount() is inserted
at the beginning of functions. This function is called on every function's
entry as long as tracing is enabled.
In addition, function_graph tracer also needs to be able to probe function's
exit. ftrace_graph_caller() & return_to_handler do this by faking link
register's value to intercept function's return path.

More details on architecture specific requirements are described in
Documentation/trace/ftrace-design.txt.
Reviewed-by: default avatarGanapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAKASHI Takahiro <takahiro.akashi@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent af64d2aa
......@@ -38,6 +38,8 @@ config ARM64
select HAVE_DMA_CONTIGUOUS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
......
/*
* arch/arm64/include/asm/ftrace.h
*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_FTRACE_H
#define __ASM_FTRACE_H
#include <asm/insn.h>
#define MCOUNT_ADDR ((unsigned long)_mcount)
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
#ifndef __ASSEMBLY__
extern void _mcount(unsigned long);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_FTRACE_H */
......@@ -5,6 +5,9 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_insn.o = -pg
# Object file lists.
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
......@@ -13,6 +16,7 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
......
......@@ -61,3 +61,7 @@ EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(test_and_change_bit);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
/*
* arch/arm64/kernel/entry-ftrace.S
*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
/*
* Gcc with -pg will put the following code in the beginning of each function:
* mov x0, x30
* bl _mcount
* [function's body ...]
* "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
* ftrace is enabled.
*
* Please note that x0 as an argument will not be used here because we can
* get lr(x30) of instrumented function at any time by winding up call stack
* as long as the kernel is compiled without -fomit-frame-pointer.
* (or CONFIG_FRAME_POINTER, this is forced on arm64)
*
* stack layout after mcount_enter in _mcount():
*
* current sp/fp => 0:+-----+
* in _mcount() | x29 | -> instrumented function's fp
* +-----+
* | x30 | -> _mcount()'s lr (= instrumented function's pc)
* old sp => +16:+-----+
* when instrumented | |
* function calls | ... |
* _mcount() | |
* | |
* instrumented => +xx:+-----+
* function's fp | x29 | -> parent's fp
* +-----+
* | x30 | -> instrumented function's lr (= parent's pc)
* +-----+
* | ... |
*/
.macro mcount_enter
stp x29, x30, [sp, #-16]!
mov x29, sp
.endm
.macro mcount_exit
ldp x29, x30, [sp], #16
ret
.endm
.macro mcount_adjust_addr rd, rn
sub \rd, \rn, #AARCH64_INSN_SIZE
.endm
/* for instrumented function's parent */
.macro mcount_get_parent_fp reg
ldr \reg, [x29]
ldr \reg, [\reg]
.endm
/* for instrumented function */
.macro mcount_get_pc0 reg
mcount_adjust_addr \reg, x30
.endm
.macro mcount_get_pc reg
ldr \reg, [x29, #8]
mcount_adjust_addr \reg, \reg
.endm
.macro mcount_get_lr reg
ldr \reg, [x29]
ldr \reg, [\reg, #8]
mcount_adjust_addr \reg, \reg
.endm
.macro mcount_get_lr_addr reg
ldr \reg, [x29]
add \reg, \reg, #8
.endm
/*
* void _mcount(unsigned long return_address)
* @return_address: return address to instrumented function
*
* This function makes calls, if enabled, to:
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
ENTRY(_mcount)
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
ldr x0, =ftrace_trace_stop
ldr x0, [x0] // if ftrace_trace_stop
ret // return;
#endif
mcount_enter
ldr x0, =ftrace_trace_function
ldr x2, [x0]
adr x0, ftrace_stub
cmp x0, x2 // if (ftrace_trace_function
b.eq skip_ftrace_call // != ftrace_stub) {
mcount_get_pc x0 // function's pc
mcount_get_lr x1 // function's lr (= parent's pc)
blr x2 // (*ftrace_trace_function)(pc, lr);
#ifndef CONFIG_FUNCTION_GRAPH_TRACER
skip_ftrace_call: // return;
mcount_exit // }
#else
mcount_exit // return;
// }
skip_ftrace_call:
ldr x1, =ftrace_graph_return
ldr x2, [x1] // if ((ftrace_graph_return
cmp x0, x2 // != ftrace_stub)
b.ne ftrace_graph_caller
ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry
ldr x2, [x1] // != ftrace_graph_entry_stub))
ldr x0, =ftrace_graph_entry_stub
cmp x0, x2
b.ne ftrace_graph_caller // ftrace_graph_caller();
mcount_exit
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
ENDPROC(_mcount)
ENTRY(ftrace_stub)
ret
ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* void ftrace_graph_caller(void)
*
* Called from _mcount() or ftrace_caller() when function_graph tracer is
* selected.
* This function w/ prepare_ftrace_return() fakes link register's value on
* the call stack in order to intercept instrumented function's return path
* and run return_to_handler() later on its exit.
*/
ENTRY(ftrace_graph_caller)
mcount_get_lr_addr x0 // pointer to function's saved lr
mcount_get_pc x1 // function's pc
mcount_get_parent_fp x2 // parent's fp
bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
mcount_exit
ENDPROC(ftrace_graph_caller)
/*
* void return_to_handler(void)
*
* Run ftrace_return_to_handler() before going back to parent.
* @fp is checked against the value passed by ftrace_graph_caller()
* only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled.
*/
ENTRY(return_to_handler)
str x0, [sp, #-16]!
mov x0, x29 // parent's fp
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
mov x30, x0 // restore the original return address
ldr x0, [sp], #16
ret
END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
* arch/arm64/kernel/ftrace.c
*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ftrace.h>
#include <linux/swab.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* function_graph tracer expects ftrace_return_to_handler() to be called
* on the way back to parent. For this purpose, this function is called
* in _mcount() or ftrace_caller() to replace return address (*parent) on
* the call stack to return_to_handler.
*
* Note that @frame_pointer is used only for sanity check later.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
unsigned long old;
struct ftrace_graph_ent trace;
int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
/*
* Note:
* No protection against faulting at *parent, which may be seen
* on other archs. It's unlikely on AArch64.
*/
old = *parent;
*parent = return_hooker;
trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
*parent = old;
return;
}
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer);
if (err == -EBUSY) {
*parent = old;
return;
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment