Commit 5bf9cbef authored by Yi Li's avatar Yi Li Committed by Mike Frysinger

Blackfin: update ftrace for latest toolchain

The mcount support that was finally added to the Blackfin gcc port isn't
exactly the same as what ftrace was developed against.  Now that the final
gcc version is in place, update the ftrace code to match.

While updating this, fix the swapped arguments to the tracer (signature is
(ip, parent_ip) while we were passing (parent_ip, ip)).
Signed-off-by: default avatarYi Li <yi.li@analog.com>
Signed-off-by: default avatarMike Frysinger <vapier@gentoo.org>
parent 3b67d91b
...@@ -8,6 +8,6 @@ ...@@ -8,6 +8,6 @@
#ifndef __ASM_BFIN_FTRACE_H__ #ifndef __ASM_BFIN_FTRACE_H__
#define __ASM_BFIN_FTRACE_H__ #define __ASM_BFIN_FTRACE_H__
#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call: LINK + CALL */ #define MCOUNT_INSN_SIZE 6 /* sizeof "[++sp] = rets; call __mcount;" */
#endif #endif
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
* only one we can blow away. With pointer registers, we have P0-P2. * only one we can blow away. With pointer registers, we have P0-P2.
* *
* Upon entry, the RETS will point to the top of the current profiled * Upon entry, the RETS will point to the top of the current profiled
* function. And since GCC setup the frame for us, the previous function * function. And since GCC pushed the previous RETS for us, the previous
* will be waiting there. mmmm pie. * function will be waiting there. mmmm pie.
*/ */
ENTRY(__mcount) ENTRY(__mcount)
/* save third function arg early so we can do testing below */ /* save third function arg early so we can do testing below */
...@@ -70,14 +70,14 @@ ENTRY(__mcount) ...@@ -70,14 +70,14 @@ ENTRY(__mcount)
/* setup the tracer function */ /* setup the tracer function */
p0 = r3; p0 = r3;
/* tracer(ulong frompc, ulong selfpc): /* function_trace_call(unsigned long ip, unsigned long parent_ip):
* frompc: the pc that did the call to ... * ip: this point was called by ...
* selfpc: ... this location * parent_ip: ... this function
* the selfpc itself will need adjusting for the mcount call * the ip itself will need adjusting for the mcount call
*/ */
r1 = rets; r0 = rets;
r0 = [fp + 4]; r1 = [sp + 16]; /* skip the 4 local regs on stack */
r1 += -MCOUNT_INSN_SIZE; r0 += -MCOUNT_INSN_SIZE;
/* call the tracer */ /* call the tracer */
call (p0); call (p0);
...@@ -106,9 +106,10 @@ ENTRY(_ftrace_graph_caller) ...@@ -106,9 +106,10 @@ ENTRY(_ftrace_graph_caller)
[--sp] = r1; [--sp] = r1;
[--sp] = rets; [--sp] = rets;
r0 = fp; /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */
r0 = sp;
r1 = rets; r1 = rets;
r0 += 4; r0 += 16; /* skip the 4 local regs on stack */
r1 += -MCOUNT_INSN_SIZE; r1 += -MCOUNT_INSN_SIZE;
call _prepare_ftrace_return; call _prepare_ftrace_return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment