Commit e4b053d9 authored by Paul Mundt's avatar Paul Mundt

sh: ftrace: Make code modification NMI safe.

This cribs the x86 implementation of ftrace_nmi_enter() and friends to
make ftrace_modify_code() NMI safe, particularly on SMP configurations.

For additional notes on the problems involved, see the comment below
ftrace_call_replace().
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent c8afde7f
...@@ -38,6 +38,7 @@ config SUPERH32 ...@@ -38,6 +38,7 @@ config SUPERH32
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_SYSCALLS select HAVE_FTRACE_SYSCALLS
select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select ARCH_HIBERNATION_POSSIBLE if MMU select ARCH_HIBERNATION_POSSIBLE if MMU
......
...@@ -62,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) ...@@ -62,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
return ftrace_replaced_code; return ftrace_replaced_code;
} }
/*
* Modifying code must take extra care. On an SMP machine, if
* the code being modified is also being executed on another CPU
* that CPU will have undefined results and possibly take a GPF.
* We use kstop_machine to stop other CPUS from exectuing code.
* But this does not stop NMIs from happening. We still need
* to protect against that. We separate out the modification of
* the code to take care of this.
*
* Two buffers are added: An IP buffer and a "code" buffer.
*
* 1) Put the instruction pointer into the IP buffer
* and the new code into the "code" buffer.
* 2) Wait for any running NMIs to finish and set a flag that says
* we are modifying code, it is done in an atomic operation.
* 3) Write the code
* 4) clear the flag.
* 5) Wait for any running NMIs to finish.
*
* If an NMI is executed, the first thing it does is to call
* "ftrace_nmi_enter". This will check if the flag is set to write
* and if it is, it will write what is in the IP and "code" buffers.
*
* The trick is, it does not matter if everyone is writing the same
* content to the code location. Also, if a CPU is executing code
* it is OK to write to that code location if the contents being written
* are the same as what exists.
*/
#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
static void *mod_code_ip; /* holds the IP to write to */
static void *mod_code_newcode; /* holds the text to write to the IP */
static unsigned nmi_wait_count;
static atomic_t nmi_update_count = ATOMIC_INIT(0);
int ftrace_arch_read_dyn_info(char *buf, int size)
{
int r;
r = snprintf(buf, size, "%u %u",
nmi_wait_count,
atomic_read(&nmi_update_count));
return r;
}
static void clear_mod_flag(void)
{
int old = atomic_read(&nmi_running);
for (;;) {
int new = old & ~MOD_CODE_WRITE_FLAG;
if (old == new)
break;
old = atomic_cmpxchg(&nmi_running, old, new);
}
}
static void ftrace_mod_code(void)
{
/*
* Yes, more than one CPU process can be writing to mod_code_status.
* (and the code itself)
* But if one were to fail, then they all should, and if one were
* to succeed, then they all should.
*/
mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
MCOUNT_INSN_SIZE);
/* if we fail, then kill any new writers */
if (mod_code_status)
clear_mod_flag();
}
void ftrace_nmi_enter(void)
{
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
smp_rmb();
ftrace_mod_code();
atomic_inc(&nmi_update_count);
}
/* Must have previous changes seen before executions */
smp_mb();
}
void ftrace_nmi_exit(void)
{
/* Finish all executions before clearing nmi_running */
smp_mb();
atomic_dec(&nmi_running);
}
static void wait_for_nmi_and_set_mod_flag(void)
{
if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
return;
do {
cpu_relax();
} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
nmi_wait_count++;
}
static void wait_for_nmi(void)
{
if (!atomic_read(&nmi_running))
return;
do {
cpu_relax();
} while (atomic_read(&nmi_running));
nmi_wait_count++;
}
static int
do_ftrace_mod_code(unsigned long ip, void *new_code)
{
mod_code_ip = (void *)ip;
mod_code_newcode = new_code;
/* The buffers need to be visible before we let NMIs write them */
smp_mb();
wait_for_nmi_and_set_mod_flag();
/* Make sure all running NMIs have finished before we write the code */
smp_mb();
ftrace_mod_code();
/* Make sure the write happens before clearing the bit */
smp_mb();
clear_mod_flag();
wait_for_nmi();
return mod_code_status;
}
static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code) unsigned char *new_code)
{ {
...@@ -86,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, ...@@ -86,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return -EINVAL; return -EINVAL;
/* replace the text with the new text */ /* replace the text with the new text */
if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) if (do_ftrace_mod_code(ip, new_code))
return -EPERM; return -EPERM;
flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment