Commit dce0e7d5 authored by Petri Gynther's avatar Petri Gynther Committed by Ralf Baechle

MIPS: ftrace: Fix dynamic tracing of kernel modules

Dynamic tracing of kernel modules is broken on 32-bit MIPS. When modules
are loaded, the kernel crashes when dynamic tracing is enabled with:
 cd /sys/kernel/debug/tracing
 echo > set_ftrace_filter
 echo function > current_tracer

1) arch/mips/kernel/ftrace.c
When the kernel boots, or when a module is initialized, ftrace_make_nop()
modifies every _mcount call site to eliminate the ftrace overhead.
However, when ftrace is later enabled for a call site, ftrace_make_call()
does not currently restore the _mcount call correctly for module call sites.
Added ftrace_modify_code_2r() and modified ftrace_make_call() to fix this.

2) arch/mips/kernel/mcount.S
_mcount assembly routine is supposed to have the caller's _mcount call site
address in register a0. However, a0 is currently not calculated correctly for
module call sites. a0 should be (ra - 20) or (ra - 24), depending on whether
the kernel was built with KBUILD_MCOUNT_RA_ADDRESS or not.

This fix has been tested on Broadcom BMIPS5000 processor. Dynamic tracing
now works for both built-in functions and module functions.
Signed-off-by: default avatarPetri Gynther <pgynther@google.com>
Cc: linux-mips@linux-mips.org
Cc: rostedt@goodmis.org
Cc: alcooperx@gmail.com
Cc: cminyard@mvista.com
Patchwork: https://patchwork.linux-mips.org/patch/7476/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent b1442d39
...@@ -63,7 +63,7 @@ static inline int in_kernel_space(unsigned long ip) ...@@ -63,7 +63,7 @@ static inline int in_kernel_space(unsigned long ip)
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
static unsigned int insn_jal_ftrace_caller __read_mostly; static unsigned int insn_jal_ftrace_caller __read_mostly;
static unsigned int insn_lui_v1_hi16_mcount __read_mostly; static unsigned int insn_la_mcount[2] __read_mostly;
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
static inline void ftrace_dyn_arch_init_insns(void) static inline void ftrace_dyn_arch_init_insns(void)
...@@ -71,10 +71,10 @@ static inline void ftrace_dyn_arch_init_insns(void) ...@@ -71,10 +71,10 @@ static inline void ftrace_dyn_arch_init_insns(void)
u32 *buf; u32 *buf;
unsigned int v1; unsigned int v1;
/* lui v1, hi16_mcount */ /* la v1, _mcount */
v1 = 3; v1 = 3;
buf = (u32 *)&insn_lui_v1_hi16_mcount; buf = (u32 *)&insn_la_mcount[0];
UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); UASM_i_LA(&buf, v1, MCOUNT_ADDR);
/* jal (ftrace_caller + 8), jump over the first two instruction */ /* jal (ftrace_caller + 8), jump over the first two instruction */
buf = (u32 *)&insn_jal_ftrace_caller; buf = (u32 *)&insn_jal_ftrace_caller;
...@@ -111,14 +111,47 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, ...@@ -111,14 +111,47 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
unsigned int new_code2) unsigned int new_code2)
{ {
int faulted; int faulted;
mm_segment_t old_fs;
safe_store_code(new_code1, ip, faulted); safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted)) if (unlikely(faulted))
return -EFAULT; return -EFAULT;
safe_store_code(new_code2, ip + 4, faulted);
ip += 4;
safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted)) if (unlikely(faulted))
return -EFAULT; return -EFAULT;
ip -= 4;
old_fs = get_fs();
set_fs(get_ds());
flush_icache_range(ip, ip + 8); flush_icache_range(ip, ip + 8);
set_fs(old_fs);
return 0;
}
static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
mm_segment_t old_fs;
ip += 4;
safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip -= 4;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
old_fs = get_fs();
set_fs(get_ds());
flush_icache_range(ip, ip + 8);
set_fs(old_fs);
return 0; return 0;
} }
#endif #endif
...@@ -130,13 +163,14 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, ...@@ -130,13 +163,14 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
* *
* move at, ra * move at, ra
* jal _mcount --> nop * jal _mcount --> nop
* sub sp, sp, 8 --> nop (CONFIG_32BIT)
* *
* 2. For modules: * 2. For modules:
* *
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
* *
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
* addiu v1, v1, low_16bit_of_mcount * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra * move at, ra
* move $12, ra_address * move $12, ra_address
* jalr v1 * jalr v1
...@@ -145,7 +179,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, ...@@ -145,7 +179,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
* 2.2 For the Other situations * 2.2 For the Other situations
* *
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
* addiu v1, v1, low_16bit_of_mcount * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra * move at, ra
* jalr v1 * jalr v1
* nop | move $12, ra_address | sub sp, sp, 8 * nop | move $12, ra_address | sub sp, sp, 8
...@@ -184,10 +218,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ...@@ -184,10 +218,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new; unsigned int new;
unsigned long ip = rec->ip; unsigned long ip = rec->ip;
new = in_kernel_space(ip) ? insn_jal_ftrace_caller : new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
insn_lui_v1_hi16_mcount;
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new); return ftrace_modify_code(ip, new);
#else
return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
INSN_NOP : insn_la_mcount[1]);
#endif
} }
#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
......
...@@ -84,6 +84,19 @@ _mcount: ...@@ -84,6 +84,19 @@ _mcount:
#endif #endif
PTR_SUBU a0, ra, 8 /* arg1: self address */ PTR_SUBU a0, ra, 8 /* arg1: self address */
PTR_LA t1, _stext
sltu t2, a0, t1 /* t2 = (a0 < _stext) */
PTR_LA t1, _etext
sltu t3, t1, a0 /* t3 = (a0 > _etext) */
or t1, t2, t3
beqz t1, ftrace_call
nop
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */
#else
PTR_SUBU a0, a0, 12
#endif
.globl ftrace_call .globl ftrace_call
ftrace_call: ftrace_call:
nop /* a placeholder for the call to a real tracing function */ nop /* a placeholder for the call to a real tracing function */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment