Commit a4aebff7 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/ftrace' into for-next/core

* for-next/ftrace:
  ftrace: arm64: remove static ftrace
  ftrace: arm64: move from REGS to ARGS
  ftrace: abstract DYNAMIC_FTRACE_WITH_ARGS accesses
  ftrace: rename ftrace_instruction_pointer_set() -> ftrace_regs_set_instruction_pointer()
  ftrace: pass fregs to arch_ftrace_set_direct_caller()
parents 1a916ed7 cfce092d
...@@ -118,6 +118,7 @@ config ARM64 ...@@ -118,6 +118,7 @@ config ARM64
select CPU_PM if (SUSPEND || CPU_IDLE) select CPU_PM if (SUSPEND || CPU_IDLE)
select CRC32 select CRC32
select DCACHE_WORD_ACCESS select DCACHE_WORD_ACCESS
select DYNAMIC_FTRACE if FUNCTION_TRACER
select DMA_DIRECT_REMAP select DMA_DIRECT_REMAP
select EDAC_SUPPORT select EDAC_SUPPORT
select FRAME_POINTER select FRAME_POINTER
...@@ -182,8 +183,10 @@ config ARM64 ...@@ -182,8 +183,10 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
if $(cc-option,-fpatchable-function-entry=2)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_REGS if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
...@@ -234,16 +237,16 @@ config ARM64 ...@@ -234,16 +237,16 @@ config ARM64
help help
ARM 64-bit (AArch64) Linux support. ARM 64-bit (AArch64) Linux support.
config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_CLANG def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1507 # https://github.com/ClangBuiltLinux/linux/issues/1507
depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600)) depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_ARGS
config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_GCC def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=2) depends on $(cc-option,-fpatchable-function-entry=2)
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_ARGS
config 64BIT config 64BIT
def_bool y def_bool y
...@@ -1836,7 +1839,7 @@ config ARM64_PTR_AUTH_KERNEL ...@@ -1836,7 +1839,7 @@ config ARM64_PTR_AUTH_KERNEL
# which is only understood by binutils starting with version 2.33.1. # which is only understood by binutils starting with version 2.33.1.
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100) depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS)
help help
If the compiler supports the -mbranch-protection or If the compiler supports the -mbranch-protection or
-msign-return-address flag (e.g. GCC 7 or later), then this option -msign-return-address flag (e.g. GCC 7 or later), then this option
...@@ -1846,7 +1849,7 @@ config ARM64_PTR_AUTH_KERNEL ...@@ -1846,7 +1849,7 @@ config ARM64_PTR_AUTH_KERNEL
disabled with minimal loss of protection. disabled with minimal loss of protection.
This feature works with FUNCTION_GRAPH_TRACER option only if This feature works with FUNCTION_GRAPH_TRACER option only if
DYNAMIC_FTRACE_WITH_REGS is enabled. DYNAMIC_FTRACE_WITH_ARGS is enabled.
config CC_HAS_BRANCH_PROT_PAC_RET config CC_HAS_BRANCH_PROT_PAC_RET
# GCC 9 or later, clang 8 or later # GCC 9 or later, clang 8 or later
...@@ -1944,7 +1947,7 @@ config ARM64_BTI_KERNEL ...@@ -1944,7 +1947,7 @@ config ARM64_BTI_KERNEL
depends on !CC_IS_GCC depends on !CC_IS_GCC
# https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS)
help help
Build the kernel with Branch Target Identification annotations Build the kernel with Branch Target Identification annotations
and enable enforcement of this for kernel code. When this option and enable enforcement of this for kernel code. When this option
......
...@@ -139,7 +139,7 @@ endif ...@@ -139,7 +139,7 @@ endif
CHECKFLAGS += -D__aarch64__ CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y) ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2 CC_FLAGS_FTRACE := -fpatchable-function-entry=2
endif endif
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
*/ */
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
#define ARCH_SUPPORTS_FTRACE_OPS 1 #define ARCH_SUPPORTS_FTRACE_OPS 1
#else #else
#define MCOUNT_ADDR ((unsigned long)_mcount) #define MCOUNT_ADDR ((unsigned long)_mcount)
...@@ -33,8 +33,7 @@ ...@@ -33,8 +33,7 @@
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
#define FTRACE_PLT_IDX 0 #define FTRACE_PLT_IDX 0
#define FTRACE_REGS_PLT_IDX 1 #define NR_FTRACE_PLTS 1
#define NR_FTRACE_PLTS 2
/* /*
* Currently, gcc tends to save the link register after the local variables * Currently, gcc tends to save the link register after the local variables
...@@ -69,7 +68,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -69,7 +68,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
* Adjust addr to point at the BL in the callsite. * Adjust addr to point at the BL in the callsite.
* See ftrace_init_nop() for the callsite sequence. * See ftrace_init_nop() for the callsite sequence.
*/ */
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
return addr + AARCH64_INSN_SIZE; return addr + AARCH64_INSN_SIZE;
/* /*
* addr is the address of the mcount call instruction. * addr is the address of the mcount call instruction.
...@@ -78,10 +77,71 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -78,10 +77,71 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr; return addr;
} }
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
struct dyn_ftrace; struct dyn_ftrace;
struct ftrace_ops; struct ftrace_ops;
struct ftrace_regs;
#define arch_ftrace_get_regs(regs) NULL
struct ftrace_regs {
/* x0 - x8 */
unsigned long regs[9];
unsigned long __unused;
unsigned long fp;
unsigned long lr;
unsigned long sp;
unsigned long pc;
};
static __always_inline unsigned long
ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
{
return fregs->pc;
}
static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
unsigned long pc)
{
fregs->pc = pc;
}
static __always_inline unsigned long
ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs)
{
return fregs->sp;
}
static __always_inline unsigned long
ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n)
{
if (n < 8)
return fregs->regs[n];
return 0;
}
static __always_inline unsigned long
ftrace_regs_get_return_value(const struct ftrace_regs *fregs)
{
return fregs->regs[0];
}
static __always_inline void
ftrace_regs_set_return_value(struct ftrace_regs *fregs,
unsigned long ret)
{
fregs->regs[0] = ret;
}
static __always_inline void
ftrace_override_function_with_return(struct ftrace_regs *fregs)
{
fregs->pc = fregs->lr;
}
int ftrace_regs_query_register_offset(const char *name);
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop #define ftrace_init_nop ftrace_init_nop
......
...@@ -82,6 +82,19 @@ int main(void) ...@@ -82,6 +82,19 @@ int main(void)
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK(); BLANK();
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
DEFINE(FREGS_X0, offsetof(struct ftrace_regs, regs[0]));
DEFINE(FREGS_X2, offsetof(struct ftrace_regs, regs[2]));
DEFINE(FREGS_X4, offsetof(struct ftrace_regs, regs[4]));
DEFINE(FREGS_X6, offsetof(struct ftrace_regs, regs[6]));
DEFINE(FREGS_X8, offsetof(struct ftrace_regs, regs[8]));
DEFINE(FREGS_FP, offsetof(struct ftrace_regs, fp));
DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr));
DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp));
DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc));
DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs));
BLANK();
#endif
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0)); DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0)); DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0));
......
...@@ -13,83 +13,58 @@ ...@@ -13,83 +13,58 @@
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/insn.h> #include <asm/insn.h>
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
/* /*
* Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
* the regular function prologue. For an enabled callsite, ftrace_init_nop() and * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
* ftrace_make_call() have patched those NOPs to: * ftrace_make_call() have patched those NOPs to:
* *
* MOV X9, LR * MOV X9, LR
* BL <entry> * BL ftrace_caller
*
* ... where <entry> is either ftrace_caller or ftrace_regs_caller.
* *
* Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
* live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
* clobber. * clobber.
* *
* We save the callsite's context into a pt_regs before invoking any ftrace * We save the callsite's context into a struct ftrace_regs before invoking any
* callbacks. So that we can get a sensible backtrace, we create a stack record * ftrace callbacks. So that we can get a sensible backtrace, we create frame
* for the callsite and the ftrace entry assembly. This is not sufficient for * records for the callsite and the ftrace entry assembly. This is not
* reliable stacktrace: until we create the callsite stack record, its caller * sufficient for reliable stacktrace: until we create the callsite stack
* is missing from the LR and existing chain of frame records. * record, its caller is missing from the LR and existing chain of frame
* records.
*/ */
.macro ftrace_regs_entry, allregs=0 SYM_CODE_START(ftrace_caller)
/* Make room for pt_regs, plus a callee frame */ bti c
sub sp, sp, #(PT_REGS_SIZE + 16)
/* Save function arguments (and x9 for simplicity) */
stp x0, x1, [sp, #S_X0]
stp x2, x3, [sp, #S_X2]
stp x4, x5, [sp, #S_X4]
stp x6, x7, [sp, #S_X6]
stp x8, x9, [sp, #S_X8]
/* Optionally save the callee-saved registers, always save the FP */
.if \allregs == 1
stp x10, x11, [sp, #S_X10]
stp x12, x13, [sp, #S_X12]
stp x14, x15, [sp, #S_X14]
stp x16, x17, [sp, #S_X16]
stp x18, x19, [sp, #S_X18]
stp x20, x21, [sp, #S_X20]
stp x22, x23, [sp, #S_X22]
stp x24, x25, [sp, #S_X24]
stp x26, x27, [sp, #S_X26]
stp x28, x29, [sp, #S_X28]
.else
str x29, [sp, #S_FP]
.endif
/* Save the callsite's SP and LR */
add x10, sp, #(PT_REGS_SIZE + 16)
stp x9, x10, [sp, #S_LR]
/* Save the PC after the ftrace callsite */ /* Save original SP */
str x30, [sp, #S_PC] mov x10, sp
/* Create a frame record for the callsite above pt_regs */ /* Make room for ftrace regs, plus two frame records */
stp x29, x9, [sp, #PT_REGS_SIZE] sub sp, sp, #(FREGS_SIZE + 32)
add x29, sp, #PT_REGS_SIZE
/* Create our frame record within pt_regs. */ /* Save function arguments */
stp x29, x30, [sp, #S_STACKFRAME] stp x0, x1, [sp, #FREGS_X0]
add x29, sp, #S_STACKFRAME stp x2, x3, [sp, #FREGS_X2]
.endm stp x4, x5, [sp, #FREGS_X4]
stp x6, x7, [sp, #FREGS_X6]
str x8, [sp, #FREGS_X8]
SYM_CODE_START(ftrace_regs_caller) /* Save the callsite's FP, LR, SP */
bti c str x29, [sp, #FREGS_FP]
ftrace_regs_entry 1 str x9, [sp, #FREGS_LR]
b ftrace_common str x10, [sp, #FREGS_SP]
SYM_CODE_END(ftrace_regs_caller)
SYM_CODE_START(ftrace_caller) /* Save the PC after the ftrace callsite */
bti c str x30, [sp, #FREGS_PC]
ftrace_regs_entry 0
b ftrace_common /* Create a frame record for the callsite above the ftrace regs */
SYM_CODE_END(ftrace_caller) stp x29, x9, [sp, #FREGS_SIZE + 16]
add x29, sp, #FREGS_SIZE + 16
/* Create our frame record above the ftrace regs */
stp x29, x30, [sp, #FREGS_SIZE]
add x29, sp, #FREGS_SIZE
SYM_CODE_START(ftrace_common)
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
mov x1, x9 // parent_ip (callsite's LR) mov x1, x9 // parent_ip (callsite's LR)
ldr_l x2, function_trace_op // op ldr_l x2, function_trace_op // op
...@@ -104,24 +79,24 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ...@@ -104,24 +79,24 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
* to restore x0-x8, x29, and x30. * to restore x0-x8, x29, and x30.
*/ */
/* Restore function arguments */ /* Restore function arguments */
ldp x0, x1, [sp] ldp x0, x1, [sp, #FREGS_X0]
ldp x2, x3, [sp, #S_X2] ldp x2, x3, [sp, #FREGS_X2]
ldp x4, x5, [sp, #S_X4] ldp x4, x5, [sp, #FREGS_X4]
ldp x6, x7, [sp, #S_X6] ldp x6, x7, [sp, #FREGS_X6]
ldr x8, [sp, #S_X8] ldr x8, [sp, #FREGS_X8]
/* Restore the callsite's FP, LR, PC */ /* Restore the callsite's FP, LR, PC */
ldr x29, [sp, #S_FP] ldr x29, [sp, #FREGS_FP]
ldr x30, [sp, #S_LR] ldr x30, [sp, #FREGS_LR]
ldr x9, [sp, #S_PC] ldr x9, [sp, #FREGS_PC]
/* Restore the callsite's SP */ /* Restore the callsite's SP */
add sp, sp, #PT_REGS_SIZE + 16 add sp, sp, #FREGS_SIZE + 32
ret x9 ret x9
SYM_CODE_END(ftrace_common) SYM_CODE_END(ftrace_caller)
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
/* /*
* Gcc with -pg will put the following code in the beginning of each function: * Gcc with -pg will put the following code in the beginning of each function:
...@@ -195,44 +170,6 @@ SYM_CODE_END(ftrace_common) ...@@ -195,44 +170,6 @@ SYM_CODE_END(ftrace_common)
add \reg, \reg, #8 add \reg, \reg, #8
.endm .endm
#ifndef CONFIG_DYNAMIC_FTRACE
/*
* void _mcount(unsigned long return_address)
* @return_address: return address to instrumented function
*
* This function makes calls, if enabled, to:
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
SYM_FUNC_START(_mcount)
mcount_enter
ldr_l x2, ftrace_trace_function
adr x0, ftrace_stub
cmp x0, x2 // if (ftrace_trace_function
b.eq skip_ftrace_call // != ftrace_stub) {
mcount_get_pc x0 // function's pc
mcount_get_lr x1 // function's lr (= parent's pc)
blr x2 // (*ftrace_trace_function)(pc, lr);
skip_ftrace_call: // }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ldr_l x2, ftrace_graph_return
cmp x0, x2 // if ((ftrace_graph_return
b.ne ftrace_graph_caller // != ftrace_stub)
ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
cmp x0, x2
b.ne ftrace_graph_caller // ftrace_graph_caller();
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
mcount_exit
SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
#else /* CONFIG_DYNAMIC_FTRACE */
/* /*
* _mcount() is used to build the kernel with -pg option, but all the branch * _mcount() is used to build the kernel with -pg option, but all the branch
* instructions to _mcount() are replaced to NOP initially at kernel start up, * instructions to _mcount() are replaced to NOP initially at kernel start up,
...@@ -272,7 +209,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller(); ...@@ -272,7 +209,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
mcount_exit mcount_exit
SYM_FUNC_END(ftrace_caller) SYM_FUNC_END(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* /*
...@@ -293,7 +229,7 @@ SYM_FUNC_START(ftrace_graph_caller) ...@@ -293,7 +229,7 @@ SYM_FUNC_START(ftrace_graph_caller)
mcount_exit mcount_exit
SYM_FUNC_END(ftrace_graph_caller) SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
SYM_TYPED_FUNC_START(ftrace_stub) SYM_TYPED_FUNC_START(ftrace_stub)
ret ret
......
...@@ -17,7 +17,49 @@ ...@@ -17,7 +17,49 @@
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/patching.h> #include <asm/patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
struct fregs_offset {
const char *name;
int offset;
};
#define FREGS_OFFSET(n, field) \
{ \
.name = n, \
.offset = offsetof(struct ftrace_regs, field), \
}
static const struct fregs_offset fregs_offsets[] = {
FREGS_OFFSET("x0", regs[0]),
FREGS_OFFSET("x1", regs[1]),
FREGS_OFFSET("x2", regs[2]),
FREGS_OFFSET("x3", regs[3]),
FREGS_OFFSET("x4", regs[4]),
FREGS_OFFSET("x5", regs[5]),
FREGS_OFFSET("x6", regs[6]),
FREGS_OFFSET("x7", regs[7]),
FREGS_OFFSET("x8", regs[8]),
FREGS_OFFSET("x29", fp),
FREGS_OFFSET("x30", lr),
FREGS_OFFSET("lr", lr),
FREGS_OFFSET("sp", sp),
FREGS_OFFSET("pc", pc),
};
int ftrace_regs_query_register_offset(const char *name)
{
for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) {
const struct fregs_offset *roff = &fregs_offsets[i];
if (!strcmp(roff->name, name))
return roff->offset;
}
return -EINVAL;
}
#endif
/* /*
* Replace a single instruction, which may be a branch or NOP. * Replace a single instruction, which may be a branch or NOP.
* If @validate == true, a replaced instruction is checked against 'old'. * If @validate == true, a replaced instruction is checked against 'old'.
...@@ -70,9 +112,6 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) ...@@ -70,9 +112,6 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
if (addr == FTRACE_ADDR) if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX]; return &plt[FTRACE_PLT_IDX];
if (addr == FTRACE_REGS_ADDR &&
IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return &plt[FTRACE_REGS_PLT_IDX];
#endif #endif
return NULL; return NULL;
} }
...@@ -154,25 +193,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ...@@ -154,25 +193,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_modify_code(pc, old, new, true); return ftrace_modify_code(pc, old, new, true);
} }
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned long pc = rec->ip;
u32 old, new;
if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
return -EINVAL;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
old = aarch64_insn_gen_branch_imm(pc, old_addr,
AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, old, new, true);
}
/* /*
* The compiler has inserted two NOPs before the regular function prologue. * The compiler has inserted two NOPs before the regular function prologue.
* All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live, * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
...@@ -228,7 +249,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, ...@@ -228,7 +249,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
* *
* Note: 'mod' is only set at module load time. * Note: 'mod' is only set at module load time.
*/ */
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) { IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
return aarch64_insn_patch_text_nosync((void *)pc, new); return aarch64_insn_patch_text_nosync((void *)pc, new);
} }
...@@ -246,7 +267,6 @@ void arch_ftrace_update_code(int command) ...@@ -246,7 +267,6 @@ void arch_ftrace_update_code(int command)
command |= FTRACE_MAY_SLEEP; command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command); ftrace_modify_all_code(command);
} }
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* /*
...@@ -277,21 +297,11 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, ...@@ -277,21 +297,11 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
} }
} }
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs) struct ftrace_ops *op, struct ftrace_regs *fregs)
{ {
/* prepare_ftrace_return(ip, &fregs->lr, fregs->fp);
* When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL
* and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs
* in which we can safely modify the LR.
*/
struct pt_regs *regs = arch_ftrace_get_regs(fregs);
unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs);
prepare_ftrace_return(ip, parent, frame_pointer(regs));
} }
#else #else
/* /*
...@@ -323,6 +333,5 @@ int ftrace_disable_ftrace_graph_caller(void) ...@@ -323,6 +333,5 @@ int ftrace_disable_ftrace_graph_caller(void)
{ {
return ftrace_modify_graph_caller(false); return ftrace_modify_graph_caller(false);
} }
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -499,9 +499,6 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr, ...@@ -499,9 +499,6 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
__init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
mod->arch.ftrace_trampolines = plts; mod->arch.ftrace_trampolines = plts;
#endif #endif
return 0; return 0;
......
...@@ -37,12 +37,32 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs * ...@@ -37,12 +37,32 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *
return fregs->regs.msr ? &fregs->regs : NULL; return fregs->regs.msr ? &fregs->regs : NULL;
} }
static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs, static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
unsigned long ip) unsigned long ip)
{ {
regs_set_return_ip(&fregs->regs, ip); regs_set_return_ip(&fregs->regs, ip);
} }
static __always_inline unsigned long
ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs)
{
return instruction_pointer(&fregs->regs);
}
#define ftrace_regs_get_argument(fregs, n) \
regs_get_kernel_argument(&(fregs)->regs, n)
#define ftrace_regs_get_stack_pointer(fregs) \
kernel_stack_pointer(&(fregs)->regs)
#define ftrace_regs_return_value(fregs) \
regs_return_value(&(fregs)->regs)
#define ftrace_regs_set_return_value(fregs, ret) \
regs_set_return_value(&(fregs)->regs, ret)
#define ftrace_override_function_with_return(fregs) \
override_function_with_return(&(fregs)->regs)
#define ftrace_regs_query_register_offset(name) \
regs_query_register_offset(name)
struct ftrace_ops; struct ftrace_ops;
#define ftrace_graph_func ftrace_graph_func #define ftrace_graph_func ftrace_graph_func
......
...@@ -54,12 +54,33 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs * ...@@ -54,12 +54,33 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *
return NULL; return NULL;
} }
static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs, static __always_inline unsigned long
ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
{
return fregs->regs.psw.addr;
}
static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
unsigned long ip) unsigned long ip)
{ {
fregs->regs.psw.addr = ip; fregs->regs.psw.addr = ip;
} }
#define ftrace_regs_get_argument(fregs, n) \
regs_get_kernel_argument(&(fregs)->regs, n)
#define ftrace_regs_get_stack_pointer(fregs) \
kernel_stack_pointer(&(fregs)->regs)
#define ftrace_regs_return_value(fregs) \
regs_return_value(&(fregs)->regs)
#define ftrace_regs_set_return_value(fregs, ret) \
regs_set_return_value(&(fregs)->regs, ret)
#define ftrace_override_function_with_return(fregs) \
override_function_with_return(&(fregs)->regs)
#define ftrace_regs_query_register_offset(name) \
regs_query_register_offset(name)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/* /*
* When an ftrace registered caller is tracing a function that is * When an ftrace registered caller is tracing a function that is
* also set by a register_ftrace_direct() call, it needs to be * also set by a register_ftrace_direct() call, it needs to be
...@@ -67,10 +88,12 @@ static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *f ...@@ -67,10 +88,12 @@ static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *f
* place the direct caller in the ORIG_GPR2 part of pt_regs. This * place the direct caller in the ORIG_GPR2 part of pt_regs. This
* tells the ftrace_caller that there's a direct caller. * tells the ftrace_caller that there's a direct caller.
*/ */
static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{ {
struct pt_regs *regs = &fregs->regs;
regs->orig_gpr2 = addr; regs->orig_gpr2 = addr;
} }
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
/* /*
* Even though the system call numbers are identical for s390/s390x a * Even though the system call numbers are identical for s390/s390x a
......
...@@ -34,19 +34,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -34,19 +34,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr; return addr;
} }
/*
* When a ftrace registered caller is tracing a function that is
* also set by a register_ftrace_direct() call, it needs to be
* differentiated in the ftrace_caller trampoline. To do this, we
* place the direct caller in the ORIG_AX part of pt_regs. This
* tells the ftrace_caller that there's a direct caller.
*/
static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
{
/* Emulate a call */
regs->orig_ax = addr;
}
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
struct ftrace_regs { struct ftrace_regs {
struct pt_regs regs; struct pt_regs regs;
...@@ -61,9 +48,25 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs) ...@@ -61,9 +48,25 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
return &fregs->regs; return &fregs->regs;
} }
#define ftrace_instruction_pointer_set(fregs, _ip) \ #define ftrace_regs_set_instruction_pointer(fregs, _ip) \
do { (fregs)->regs.ip = (_ip); } while (0) do { (fregs)->regs.ip = (_ip); } while (0)
#define ftrace_regs_get_instruction_pointer(fregs) \
((fregs)->regs.ip)
#define ftrace_regs_get_argument(fregs, n) \
regs_get_kernel_argument(&(fregs)->regs, n)
#define ftrace_regs_get_stack_pointer(fregs) \
kernel_stack_pointer(&(fregs)->regs)
#define ftrace_regs_return_value(fregs) \
regs_return_value(&(fregs)->regs)
#define ftrace_regs_set_return_value(fregs, ret) \
regs_set_return_value(&(fregs)->regs, ret)
#define ftrace_override_function_with_return(fregs) \
override_function_with_return(&(fregs)->regs)
#define ftrace_regs_query_register_offset(name) \
regs_query_register_offset(name)
struct ftrace_ops; struct ftrace_ops;
#define ftrace_graph_func ftrace_graph_func #define ftrace_graph_func ftrace_graph_func
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
...@@ -72,6 +75,24 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, ...@@ -72,6 +75,24 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* When a ftrace registered caller is tracing a function that is
* also set by a register_ftrace_direct() call, it needs to be
* differentiated in the ftrace_caller trampoline. To do this, we
* place the direct caller in the ORIG_AX part of pt_regs. This
* tells the ftrace_caller that there's a direct caller.
*/
static inline void
__arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
{
/* Emulate a call */
regs->orig_ax = addr;
}
#define arch_ftrace_set_direct_caller(fregs, addr) \
__arch_ftrace_set_direct_caller(&(fregs)->regs, addr)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace { struct dyn_arch_ftrace {
......
...@@ -37,9 +37,10 @@ extern void ftrace_boot_snapshot(void); ...@@ -37,9 +37,10 @@ extern void ftrace_boot_snapshot(void);
static inline void ftrace_boot_snapshot(void) { } static inline void ftrace_boot_snapshot(void) { }
#endif #endif
#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops; struct ftrace_ops;
struct ftrace_regs; struct ftrace_regs;
#ifdef CONFIG_FUNCTION_TRACER
/* /*
* If the arch's mcount caller does not support all of ftrace's * If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that * features, then it must call an indirect function that
...@@ -110,12 +111,11 @@ struct ftrace_regs { ...@@ -110,12 +111,11 @@ struct ftrace_regs {
#define arch_ftrace_get_regs(fregs) (&(fregs)->regs) #define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
/* /*
* ftrace_instruction_pointer_set() is to be defined by the architecture * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
* if to allow setting of the instruction pointer from the ftrace_regs * if to allow setting of the instruction pointer from the ftrace_regs when
* when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
* live kernel patching.
*/ */
#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0) #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
...@@ -126,6 +126,35 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs ...@@ -126,6 +126,35 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs
return arch_ftrace_get_regs(fregs); return arch_ftrace_get_regs(fregs);
} }
/*
* When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
* Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
*/
static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
{
if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
return true;
return ftrace_get_regs(fregs) != NULL;
}
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
#define ftrace_regs_get_instruction_pointer(fregs) \
instruction_pointer(ftrace_get_regs(fregs))
#define ftrace_regs_get_argument(fregs, n) \
regs_get_kernel_argument(ftrace_get_regs(fregs), n)
#define ftrace_regs_get_stack_pointer(fregs) \
kernel_stack_pointer(ftrace_get_regs(fregs))
#define ftrace_regs_return_value(fregs) \
regs_return_value(ftrace_get_regs(fregs))
#define ftrace_regs_set_return_value(fregs, ret) \
regs_set_return_value(ftrace_get_regs(fregs), ret)
#define ftrace_override_function_with_return(fregs) \
override_function_with_return(ftrace_get_regs(fregs))
#define ftrace_regs_query_register_offset(name) \
regs_query_register_offset(name)
#endif
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs); struct ftrace_ops *op, struct ftrace_regs *fregs);
...@@ -427,9 +456,7 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi ...@@ -427,9 +456,7 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi
{ {
return -ENODEV; return -ENODEV;
} }
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/* /*
* This must be implemented by the architecture. * This must be implemented by the architecture.
* It is the way the ftrace direct_ops helper, when called * It is the way the ftrace direct_ops helper, when called
...@@ -443,9 +470,9 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi ...@@ -443,9 +470,9 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi
* the return from the trampoline jump to the direct caller * the return from the trampoline jump to the direct caller
* instead of going back to the function it just traced. * instead of going back to the function it just traced.
*/ */
static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
unsigned long addr) { } unsigned long addr) { }
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifdef CONFIG_STACK_TRACER #ifdef CONFIG_STACK_TRACER
......
...@@ -118,7 +118,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, ...@@ -118,7 +118,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
if (func->nop) if (func->nop)
goto unlock; goto unlock;
ftrace_instruction_pointer_set(fregs, (unsigned long)func->new_func); ftrace_regs_set_instruction_pointer(fregs, (unsigned long)func->new_func);
unlock: unlock:
ftrace_test_recursion_unlock(bit); ftrace_test_recursion_unlock(bit);
......
...@@ -46,10 +46,10 @@ config HAVE_DYNAMIC_FTRACE_WITH_ARGS ...@@ -46,10 +46,10 @@ config HAVE_DYNAMIC_FTRACE_WITH_ARGS
bool bool
help help
If this is set, then arguments and stack can be found from If this is set, then arguments and stack can be found from
the pt_regs passed into the function callback regs parameter the ftrace_regs passed into the function callback regs parameter
by default, even without setting the REGS flag in the ftrace_ops. by default, even without setting the REGS flag in the ftrace_ops.
This allows for use of regs_get_kernel_argument() and This allows for use of ftrace_regs_get_argument() and
kernel_stack_pointer(). ftrace_regs_get_stack_pointer().
config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
bool bool
......
...@@ -2487,14 +2487,13 @@ ftrace_add_rec_direct(unsigned long ip, unsigned long addr, ...@@ -2487,14 +2487,13 @@ ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
static void call_direct_funcs(unsigned long ip, unsigned long pip, static void call_direct_funcs(unsigned long ip, unsigned long pip,
struct ftrace_ops *ops, struct ftrace_regs *fregs) struct ftrace_ops *ops, struct ftrace_regs *fregs)
{ {
struct pt_regs *regs = ftrace_get_regs(fregs);
unsigned long addr; unsigned long addr;
addr = ftrace_find_rec_direct(ip); addr = ftrace_find_rec_direct(ip);
if (!addr) if (!addr)
return; return;
arch_ftrace_set_direct_caller(regs, addr); arch_ftrace_set_direct_caller(fregs, addr);
} }
struct ftrace_ops direct_ops = { struct ftrace_ops direct_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment