Commit e4486fe3 authored by Steven Rostedt's avatar Steven Rostedt

powerpc: ftrace, use probe_kernel API to modify code

Impact: use cleaner probe_kernel API over assembly

Using probe_kernel_read/write interface is a much cleaner approach
than the current assembly version.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
parent 8fd6e5a8
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -72,45 +73,33 @@ static int ...@@ -72,45 +73,33 @@ static int
ftrace_modify_code(unsigned long ip, unsigned char *old_code, ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code) unsigned char *new_code)
{ {
unsigned replaced; unsigned char replaced[MCOUNT_INSN_SIZE];
unsigned old = *(unsigned *)old_code;
unsigned new = *(unsigned *)new_code;
int faulted = 0;
/* /*
* Note: Due to modules and __init, code can * Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting * disappear and change, we need to protect against faulting
* as well as code changing. * as well as code changing. We do this by using the
* probe_kernel_* functions.
* *
* No real locking needed, this code is run through * No real locking needed, this code is run through
* kstop_machine. * kstop_machine, or before SMP starts.
*/ */
asm volatile (
"1: lwz %1, 0(%2)\n" /* read the text we want to modify */
" cmpw %1, %5\n" if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
" bne 2f\n" return -EFAULT;
" stwu %3, 0(%2)\n"
"2:\n" /* Make sure it is what we expect it to be */
".section .fixup, \"ax\"\n" if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
"3: li %0, 1\n" return -EINVAL;
" b 2b\n"
".previous\n" /* replace the text with the new text */
".section __ex_table,\"a\"\n" if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
_ASM_ALIGN "\n" return -EPERM;
_ASM_PTR "1b, 3b\n"
".previous" flush_icache_range(ip, ip + 8);
: "=r"(faulted), "=r"(replaced)
: "r"(ip), "r"(new), return 0;
"0"(faulted), "r"(old)
: "memory");
if (replaced != old && replaced != new)
faulted = 2;
if (!faulted)
flush_icache_range(ip, ip + 8);
return faulted;
} }
static int test_24bit_addr(unsigned long ip, unsigned long addr) static int test_24bit_addr(unsigned long ip, unsigned long addr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment