Commit 32663c78 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:

 - The biggest news in that the tracing ring buffer can now time events
   that interrupted other ring buffer events.

   Before this change, if an interrupt came in while recording another
   event, and that interrupt also had an event, those events would all
   have the same time stamp as the event it interrupted.

   Now, with the new design, those events will have a unique time stamp
   and rightfully display the time for those events that were recorded
   while interrupting another event.

 - Bootconfig how has an "override" operator that lets the users have a
   default config, but then add options to override the default.

 - A fix was made to properly filter function graph tracing to the
   ftrace PIDs. This came in at the end of the -rc cycle, and needs to
   be backported.

 - Several clean ups, performance updates, and minor fixes as well.

* tag 'trace-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (39 commits)
  tracing: Add trace_array_init_printk() to initialize instance trace_printk() buffers
  kprobes: Fix compiler warning for !CONFIG_KPROBES_ON_FTRACE
  tracing: Use trace_sched_process_free() instead of exit() for pid tracing
  bootconfig: Fix to find the initargs correctly
  Documentation: bootconfig: Add bootconfig override operator
  tools/bootconfig: Add testcases for value override operator
  lib/bootconfig: Add override operator support
  kprobes: Remove show_registers() function prototype
  tracing/uprobe: Remove dead code in trace_uprobe_register()
  kprobes: Fix NULL pointer dereference at kprobe_ftrace_handler
  ftrace: Fix ftrace_trace_task return value
  tracepoint: Use __used attribute definitions from compiler_attributes.h
  tracepoint: Mark __tracepoint_string's __used
  trace : Have tracing buffer info use kvzalloc instead of kzalloc
  tracing: Remove outdated comment in stack handling
  ftrace: Do not let direct or IPMODIFY ftrace_ops be added to module and set trampolines
  ftrace: Setup correct FTRACE_FL_REGS flags for module
  tracing/hwlat: Honor the tracing_cpumask
  tracing/hwlat: Drop the duplicate assignment in start_kthread()
  tracing: Save one trace_event->type by using __TRACE_LAST_TYPE
  ...
parents 7b9de977 38ce2a9e
...@@ -71,6 +71,16 @@ For example,:: ...@@ -71,6 +71,16 @@ For example,::
foo = bar, baz foo = bar, baz
foo = qux # !ERROR! we can not re-define same key foo = qux # !ERROR! we can not re-define same key
If you want to update the value, you must use the override operator
``:=`` explicitly. For example::
foo = bar, baz
foo := qux
then, the ``qux`` is assigned to ``foo`` key. This is useful for
overriding the default value by adding (partial) custom bootconfigs
without parsing the default bootconfig.
If you want to append the value to existing key as an array member, If you want to append the value to existing key as an array member,
you can use ``+=`` operator. For example:: you can use ``+=`` operator. For example::
...@@ -84,6 +94,7 @@ For example, following config is NOT allowed.:: ...@@ -84,6 +94,7 @@ For example, following config is NOT allowed.::
foo = value1 foo = value1
foo.bar = value2 # !ERROR! subkey "bar" and value "value1" can NOT co-exist foo.bar = value2 # !ERROR! subkey "bar" and value "value1" can NOT co-exist
foo.bar := value2 # !ERROR! even with the override operator, this is NOT allowed.
Comments Comments
......
...@@ -286,6 +286,7 @@ extern void ftrace_regs_caller_ret(void); ...@@ -286,6 +286,7 @@ extern void ftrace_regs_caller_ret(void);
extern void ftrace_caller_end(void); extern void ftrace_caller_end(void);
extern void ftrace_caller_op_ptr(void); extern void ftrace_caller_op_ptr(void);
extern void ftrace_regs_caller_op_ptr(void); extern void ftrace_regs_caller_op_ptr(void);
extern void ftrace_regs_caller_jmp(void);
/* movq function_trace_op(%rip), %rdx */ /* movq function_trace_op(%rip), %rdx */
/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
...@@ -316,6 +317,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -316,6 +317,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
unsigned long end_offset; unsigned long end_offset;
unsigned long op_offset; unsigned long op_offset;
unsigned long call_offset; unsigned long call_offset;
unsigned long jmp_offset;
unsigned long offset; unsigned long offset;
unsigned long npages; unsigned long npages;
unsigned long size; unsigned long size;
...@@ -333,11 +335,13 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -333,11 +335,13 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
end_offset = (unsigned long)ftrace_regs_caller_end; end_offset = (unsigned long)ftrace_regs_caller_end;
op_offset = (unsigned long)ftrace_regs_caller_op_ptr; op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
call_offset = (unsigned long)ftrace_regs_call; call_offset = (unsigned long)ftrace_regs_call;
jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
} else { } else {
start_offset = (unsigned long)ftrace_caller; start_offset = (unsigned long)ftrace_caller;
end_offset = (unsigned long)ftrace_caller_end; end_offset = (unsigned long)ftrace_caller_end;
op_offset = (unsigned long)ftrace_caller_op_ptr; op_offset = (unsigned long)ftrace_caller_op_ptr;
call_offset = (unsigned long)ftrace_call; call_offset = (unsigned long)ftrace_call;
jmp_offset = 0;
} }
size = end_offset - start_offset; size = end_offset - start_offset;
...@@ -367,10 +371,14 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -367,10 +371,14 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
if (WARN_ON(ret < 0)) if (WARN_ON(ret < 0))
goto fail; goto fail;
/* No need to test direct calls on created trampolines */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
ip = trampoline + (ftrace_regs_caller_ret - ftrace_regs_caller); /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE); ip = trampoline + (jmp_offset - start_offset);
if (WARN_ON(ret < 0)) if (WARN_ON(*(char *)ip != 0x75))
goto fail;
ret = copy_from_kernel_nofault(ip, ideal_nops[2], 2);
if (ret < 0)
goto fail; goto fail;
} }
......
...@@ -241,22 +241,10 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) ...@@ -241,22 +241,10 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
*/ */
movq ORIG_RAX(%rsp), %rax movq ORIG_RAX(%rsp), %rax
testq %rax, %rax testq %rax, %rax
jz 1f SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
jnz 1f
/* Swap the flags with orig_rax */ restore_mcount_regs
movq MCOUNT_REG_SIZE(%rsp), %rdi
movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
movq %rax, MCOUNT_REG_SIZE(%rsp)
restore_mcount_regs 8
/* Restore flags */
popfq
SYM_INNER_LABEL(ftrace_regs_caller_ret, SYM_L_GLOBAL);
UNWIND_HINT_RET_OFFSET
jmp ftrace_epilogue
1: restore_mcount_regs
/* Restore flags */ /* Restore flags */
popfq popfq
...@@ -269,6 +257,17 @@ SYM_INNER_LABEL(ftrace_regs_caller_ret, SYM_L_GLOBAL); ...@@ -269,6 +257,17 @@ SYM_INNER_LABEL(ftrace_regs_caller_ret, SYM_L_GLOBAL);
SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
jmp ftrace_epilogue jmp ftrace_epilogue
/* Swap the flags with orig_rax */
1: movq MCOUNT_REG_SIZE(%rsp), %rdi
movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
movq %rax, MCOUNT_REG_SIZE(%rsp)
restore_mcount_regs 8
/* Restore flags */
popfq
UNWIND_HINT_RET_OFFSET
jmp ftrace_epilogue
SYM_FUNC_END(ftrace_regs_caller) SYM_FUNC_END(ftrace_regs_caller)
......
...@@ -227,7 +227,6 @@ extern int arch_prepare_kprobe(struct kprobe *p); ...@@ -227,7 +227,6 @@ extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void); extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p); extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr); extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void); extern int arch_populate_kprobe_blacklist(void);
......
...@@ -143,6 +143,7 @@ bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); ...@@ -143,6 +143,7 @@ bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
void ring_buffer_reset(struct trace_buffer *buffer); void ring_buffer_reset(struct trace_buffer *buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
......
...@@ -29,6 +29,7 @@ struct trace_array; ...@@ -29,6 +29,7 @@ struct trace_array;
void trace_printk_init_buffers(void); void trace_printk_init_buffers(void);
int trace_array_printk(struct trace_array *tr, unsigned long ip, int trace_array_printk(struct trace_array *tr, unsigned long ip,
const char *fmt, ...); const char *fmt, ...);
int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr); void trace_array_put(struct trace_array *tr);
struct trace_array *trace_array_get_by_name(const char *name); struct trace_array *trace_array_get_by_name(const char *name);
int trace_array_destroy(struct trace_array *tr); int trace_array_destroy(struct trace_array *tr);
......
...@@ -116,8 +116,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -116,8 +116,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __TRACEPOINT_ENTRY(name) \ #define __TRACEPOINT_ENTRY(name) \
static tracepoint_ptr_t __tracepoint_ptr_##name __used \ static tracepoint_ptr_t __tracepoint_ptr_##name __used \
__attribute__((section("__tracepoints_ptrs"))) = \ __section(__tracepoints_ptrs) = &__tracepoint_##name
&__tracepoint_##name
#endif #endif
#endif /* _LINUX_TRACEPOINT_H */ #endif /* _LINUX_TRACEPOINT_H */
...@@ -280,9 +279,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -280,9 +279,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
*/ */
#define DEFINE_TRACE_FN(name, reg, unreg) \ #define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \ static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \ __section(__tracepoints_strings) = #name; \
struct tracepoint __tracepoint_##name \ struct tracepoint __tracepoint_##name __used \
__attribute__((section("__tracepoints"), used)) = \ __section(__tracepoints) = \
{ __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
__TRACEPOINT_ENTRY(name); __TRACEPOINT_ENTRY(name);
...@@ -361,7 +360,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -361,7 +360,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static const char *___tp_str __tracepoint_string = str; \ static const char *___tp_str __tracepoint_string = str; \
___tp_str; \ ___tp_str; \
}) })
#define __tracepoint_string __attribute__((section("__tracepoint_str"))) #define __tracepoint_string __used __section(__tracepoint_str)
#else #else
/* /*
* tracepoint_string() is used to save the string address for userspace * tracepoint_string() is used to save the string address for userspace
......
...@@ -210,8 +210,7 @@ TRACE_MAKE_SYSTEM_STR(); ...@@ -210,8 +210,7 @@ TRACE_MAKE_SYSTEM_STR();
#define DEFINE_EVENT(template, name, proto, args) #define DEFINE_EVENT(template, name, proto, args)
#undef DEFINE_EVENT_PRINT #undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_FLAGS #undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(event, flag) #define TRACE_EVENT_FLAGS(event, flag)
...@@ -443,12 +442,8 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ ...@@ -443,12 +442,8 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
tstruct \ tstruct \
{} }; {} };
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)
#undef DEFINE_EVENT_PRINT #undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
...@@ -523,13 +518,6 @@ static inline notrace int trace_event_get_offsets_##call( \ ...@@ -523,13 +518,6 @@ static inline notrace int trace_event_get_offsets_##call( \
return __data_size; \ return __data_size; \
} }
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/* /*
...@@ -721,9 +709,6 @@ static inline void ftrace_test_probe_##call(void) \ ...@@ -721,9 +709,6 @@ static inline void ftrace_test_probe_##call(void) \
check_trace_callback_type_##call(trace_event_raw_event_##template); \ check_trace_callback_type_##call(trace_event_raw_event_##template); \
} }
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef __entry #undef __entry
......
...@@ -388,8 +388,6 @@ static int __init bootconfig_params(char *param, char *val, ...@@ -388,8 +388,6 @@ static int __init bootconfig_params(char *param, char *val,
{ {
if (strcmp(param, "bootconfig") == 0) { if (strcmp(param, "bootconfig") == 0) {
bootconfig_found = true; bootconfig_found = true;
} else if (strcmp(param, "--") == 0) {
initargs_found = true;
} }
return 0; return 0;
} }
...@@ -400,19 +398,23 @@ static void __init setup_boot_config(const char *cmdline) ...@@ -400,19 +398,23 @@ static void __init setup_boot_config(const char *cmdline)
const char *msg; const char *msg;
int pos; int pos;
u32 size, csum; u32 size, csum;
char *data, *copy; char *data, *copy, *err;
int ret; int ret;
/* Cut out the bootconfig data even if we have no bootconfig option */ /* Cut out the bootconfig data even if we have no bootconfig option */
data = get_boot_config_from_initrd(&size, &csum); data = get_boot_config_from_initrd(&size, &csum);
strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL, err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
bootconfig_params); bootconfig_params);
if (!bootconfig_found) if (IS_ERR(err) || !bootconfig_found)
return; return;
/* parse_args() stops at '--' and returns an address */
if (err)
initargs_found = true;
if (!data) { if (!data) {
pr_err("'bootconfig' found on command line, but no bootconfig found\n"); pr_err("'bootconfig' found on command line, but no bootconfig found\n");
return; return;
......
...@@ -1111,9 +1111,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p) ...@@ -1111,9 +1111,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
} }
#else /* !CONFIG_KPROBES_ON_FTRACE */ #else /* !CONFIG_KPROBES_ON_FTRACE */
#define prepare_kprobe(p) arch_prepare_kprobe(p) static inline int prepare_kprobe(struct kprobe *p)
#define arm_kprobe_ftrace(p) (-ENODEV) {
#define disarm_kprobe_ftrace(p) (-ENODEV) return arch_prepare_kprobe(p);
}
static inline int arm_kprobe_ftrace(struct kprobe *p)
{
return -ENODEV;
}
static inline int disarm_kprobe_ftrace(struct kprobe *p)
{
return -ENODEV;
}
#endif #endif
/* Arm a kprobe with text_mutex */ /* Arm a kprobe with text_mutex */
...@@ -2145,6 +2156,13 @@ static void kill_kprobe(struct kprobe *p) ...@@ -2145,6 +2156,13 @@ static void kill_kprobe(struct kprobe *p)
* the original probed function (which will be freed soon) any more. * the original probed function (which will be freed soon) any more.
*/ */
arch_remove_kprobe(p); arch_remove_kprobe(p);
/*
* The module is going away. We should disarm the kprobe which
* is using ftrace.
*/
if (kprobe_ftrace(p))
disarm_kprobe_ftrace(p);
} }
/* Disable one kprobe */ /* Disable one kprobe */
......
...@@ -139,9 +139,6 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops) ...@@ -139,9 +139,6 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
#endif #endif
} }
#define FTRACE_PID_IGNORE -1
#define FTRACE_PID_TRACE -2
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs) struct ftrace_ops *op, struct pt_regs *regs)
{ {
...@@ -2388,6 +2385,14 @@ struct ftrace_ops direct_ops = { ...@@ -2388,6 +2385,14 @@ struct ftrace_ops direct_ops = {
.flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
| FTRACE_OPS_FL_PERMANENT, | FTRACE_OPS_FL_PERMANENT,
/*
* By declaring the main trampoline as this trampoline
* it will never have one allocated for it. Allocated
* trampolines should not call direct functions.
* The direct_ops should only be called by the builtin
* ftrace_regs_caller trampoline.
*/
.trampoline = FTRACE_REGS_ADDR,
}; };
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
...@@ -6255,8 +6260,19 @@ static int referenced_filters(struct dyn_ftrace *rec) ...@@ -6255,8 +6260,19 @@ static int referenced_filters(struct dyn_ftrace *rec)
int cnt = 0; int cnt = 0;
for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
if (ops_references_rec(ops, rec)) if (ops_references_rec(ops, rec)) {
cnt++; if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
continue;
if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
continue;
cnt++;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
rec->flags |= FTRACE_FL_REGS;
if (cnt == 1 && ops->trampoline)
rec->flags |= FTRACE_FL_TRAMP;
else
rec->flags &= ~FTRACE_FL_TRAMP;
}
} }
return cnt; return cnt;
...@@ -6435,8 +6451,8 @@ void ftrace_module_enable(struct module *mod) ...@@ -6435,8 +6451,8 @@ void ftrace_module_enable(struct module *mod)
if (ftrace_start_up) if (ftrace_start_up)
cnt += referenced_filters(rec); cnt += referenced_filters(rec);
/* This clears FTRACE_FL_DISABLED */ rec->flags &= ~FTRACE_FL_DISABLED;
rec->flags = cnt; rec->flags += cnt;
if (ftrace_start_up && cnt) { if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(rec, 1); int failed = __ftrace_replace_code(rec, 1);
...@@ -7066,12 +7082,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) ...@@ -7066,12 +7082,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
if (enable) { if (enable) {
register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
tr); tr);
register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
tr); tr);
} else { } else {
unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
tr); tr);
unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
tr); tr);
} }
} }
......
This diff is collapsed.
...@@ -2002,7 +2002,6 @@ static void tracing_reset_cpu(struct array_buffer *buf, int cpu) ...@@ -2002,7 +2002,6 @@ static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
void tracing_reset_online_cpus(struct array_buffer *buf) void tracing_reset_online_cpus(struct array_buffer *buf)
{ {
struct trace_buffer *buffer = buf->buffer; struct trace_buffer *buffer = buf->buffer;
int cpu;
if (!buffer) if (!buffer)
return; return;
...@@ -2014,8 +2013,7 @@ void tracing_reset_online_cpus(struct array_buffer *buf) ...@@ -2014,8 +2013,7 @@ void tracing_reset_online_cpus(struct array_buffer *buf)
buf->time_start = buffer_ftrace_now(buf, buf->cpu); buf->time_start = buffer_ftrace_now(buf, buf->cpu);
for_each_online_cpu(cpu) ring_buffer_reset_online_cpus(buffer);
ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
} }
...@@ -2931,12 +2929,6 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer, ...@@ -2931,12 +2929,6 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
skip++; skip++;
#endif #endif
/*
* Since events can happen in NMIs there's no safe way to
* use the per cpu ftrace_stacks. We reserve it and if an interrupt
* or NMI comes in, it will just have to use the default
* FTRACE_STACK_SIZE.
*/
preempt_disable_notrace(); preempt_disable_notrace();
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
...@@ -3136,6 +3128,9 @@ static int alloc_percpu_trace_buffer(void) ...@@ -3136,6 +3128,9 @@ static int alloc_percpu_trace_buffer(void)
{ {
struct trace_buffer_struct *buffers; struct trace_buffer_struct *buffers;
if (trace_percpu_buffer)
return 0;
buffers = alloc_percpu(struct trace_buffer_struct); buffers = alloc_percpu(struct trace_buffer_struct);
if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
return -ENOMEM; return -ENOMEM;
...@@ -3338,6 +3333,26 @@ int trace_array_vprintk(struct trace_array *tr, ...@@ -3338,6 +3333,26 @@ int trace_array_vprintk(struct trace_array *tr,
return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
} }
/**
* trace_array_printk - Print a message to a specific instance
* @tr: The instance trace_array descriptor
* @ip: The instruction pointer that this is called from.
* @fmt: The format to print (printf format)
*
* If a subsystem sets up its own instance, they have the right to
* printk strings into their tracing instance buffer using this
* function. Note, this function will not write into the top level
* buffer (use trace_printk() for that), as writing into the top level
* buffer should only have events that can be individually disabled.
* trace_printk() is only used for debugging a kernel, and should not
* be ever encorporated in normal use.
*
* trace_array_printk() can be used, as it will not add noise to the
* top level tracing buffer.
*
* Note, trace_array_init_printk() must be called on @tr before this
* can be used.
*/
__printf(3, 0) __printf(3, 0)
int trace_array_printk(struct trace_array *tr, int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...) unsigned long ip, const char *fmt, ...)
...@@ -3345,12 +3360,16 @@ int trace_array_printk(struct trace_array *tr, ...@@ -3345,12 +3360,16 @@ int trace_array_printk(struct trace_array *tr,
int ret; int ret;
va_list ap; va_list ap;
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
if (!tr) if (!tr)
return -ENOENT; return -ENOENT;
/* This is only allowed for created instances */
if (tr == &global_trace)
return 0;
if (!(tr->trace_flags & TRACE_ITER_PRINTK))
return 0;
va_start(ap, fmt); va_start(ap, fmt);
ret = trace_array_vprintk(tr, ip, fmt, ap); ret = trace_array_vprintk(tr, ip, fmt, ap);
va_end(ap); va_end(ap);
...@@ -3358,6 +3377,27 @@ int trace_array_printk(struct trace_array *tr, ...@@ -3358,6 +3377,27 @@ int trace_array_printk(struct trace_array *tr,
} }
EXPORT_SYMBOL_GPL(trace_array_printk); EXPORT_SYMBOL_GPL(trace_array_printk);
/**
* trace_array_init_printk - Initialize buffers for trace_array_printk()
* @tr: The trace array to initialize the buffers for
*
* As trace_array_printk() only writes into instances, they are OK to
* have in the kernel (unlike trace_printk()). This needs to be called
* before trace_array_printk() can be used on a trace_array.
*/
int trace_array_init_printk(struct trace_array *tr)
{
if (!tr)
return -ENOENT;
/* This is only allowed for created instances */
if (tr == &global_trace)
return -EINVAL;
return alloc_percpu_trace_buffer();
}
EXPORT_SYMBOL_GPL(trace_array_init_printk);
__printf(3, 4) __printf(3, 4)
int trace_array_printk_buf(struct trace_buffer *buffer, int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...) unsigned long ip, const char *fmt, ...)
...@@ -5886,7 +5926,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) ...@@ -5886,7 +5926,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
} }
/* If trace pipe files are being read, we can't change the tracer */ /* If trace pipe files are being read, we can't change the tracer */
if (tr->current_trace->ref) { if (tr->trace_ref) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
...@@ -6102,7 +6142,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) ...@@ -6102,7 +6142,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp); nonseekable_open(inode, filp);
tr->current_trace->ref++; tr->trace_ref++;
out: out:
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return ret; return ret;
...@@ -6121,7 +6161,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) ...@@ -6121,7 +6161,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
tr->current_trace->ref--; tr->trace_ref--;
if (iter->trace->pipe_close) if (iter->trace->pipe_close)
iter->trace->pipe_close(iter); iter->trace->pipe_close(iter);
...@@ -7405,7 +7445,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) ...@@ -7405,7 +7445,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
if (ret) if (ret)
return ret; return ret;
info = kzalloc(sizeof(*info), GFP_KERNEL); info = kvzalloc(sizeof(*info), GFP_KERNEL);
if (!info) { if (!info) {
trace_array_put(tr); trace_array_put(tr);
return -ENOMEM; return -ENOMEM;
...@@ -7423,7 +7463,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) ...@@ -7423,7 +7463,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
filp->private_data = info; filp->private_data = info;
tr->current_trace->ref++; tr->trace_ref++;
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
...@@ -7524,14 +7564,14 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) ...@@ -7524,14 +7564,14 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
iter->tr->current_trace->ref--; iter->tr->trace_ref--;
__trace_array_put(iter->tr); __trace_array_put(iter->tr);
if (info->spare) if (info->spare)
ring_buffer_free_read_page(iter->array_buffer->buffer, ring_buffer_free_read_page(iter->array_buffer->buffer,
info->spare_cpu, info->spare); info->spare_cpu, info->spare);
kfree(info); kvfree(info);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
...@@ -8732,7 +8772,7 @@ static int __remove_instance(struct trace_array *tr) ...@@ -8732,7 +8772,7 @@ static int __remove_instance(struct trace_array *tr)
int i; int i;
/* Reference counter for a newly created trace array = 1. */ /* Reference counter for a newly created trace array = 1. */
if (tr->ref > 1 || (tr->current_trace && tr->current_trace->ref)) if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
return -EBUSY; return -EBUSY;
list_del(&tr->list); list_del(&tr->list);
......
...@@ -356,6 +356,7 @@ struct trace_array { ...@@ -356,6 +356,7 @@ struct trace_array {
struct trace_event_file *trace_marker_file; struct trace_event_file *trace_marker_file;
cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
int ref; int ref;
int trace_ref;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops *ops; struct ftrace_ops *ops;
struct trace_pid_list __rcu *function_pids; struct trace_pid_list __rcu *function_pids;
...@@ -547,7 +548,6 @@ struct tracer { ...@@ -547,7 +548,6 @@ struct tracer {
struct tracer *next; struct tracer *next;
struct tracer_flags *flags; struct tracer_flags *flags;
int enabled; int enabled;
int ref;
bool print_max; bool print_max;
bool allow_instances; bool allow_instances;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
...@@ -1103,6 +1103,10 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags) ...@@ -1103,6 +1103,10 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
extern struct list_head ftrace_pids; extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
#define FTRACE_PID_IGNORE -1
#define FTRACE_PID_TRACE -2
struct ftrace_func_command { struct ftrace_func_command {
struct list_head list; struct list_head list;
char *name; char *name;
...@@ -1114,7 +1118,8 @@ struct ftrace_func_command { ...@@ -1114,7 +1118,8 @@ struct ftrace_func_command {
extern bool ftrace_filter_param __initdata; extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct trace_array *tr) static inline int ftrace_trace_task(struct trace_array *tr)
{ {
return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
FTRACE_PID_IGNORE;
} }
extern int ftrace_is_dead(void); extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr, int ftrace_create_function_files(struct trace_array *tr,
......
...@@ -538,12 +538,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable) ...@@ -538,12 +538,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
if (enable) { if (enable) {
register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
tr, INT_MIN); tr, INT_MIN);
register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit, register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
tr, INT_MAX); tr, INT_MAX);
} else { } else {
unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
tr); tr);
unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit, unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
tr); tr);
} }
} }
......
...@@ -283,6 +283,7 @@ static bool disable_migrate; ...@@ -283,6 +283,7 @@ static bool disable_migrate;
static void move_to_next_cpu(void) static void move_to_next_cpu(void)
{ {
struct cpumask *current_mask = &save_cpumask; struct cpumask *current_mask = &save_cpumask;
struct trace_array *tr = hwlat_trace;
int next_cpu; int next_cpu;
if (disable_migrate) if (disable_migrate)
...@@ -296,7 +297,7 @@ static void move_to_next_cpu(void) ...@@ -296,7 +297,7 @@ static void move_to_next_cpu(void)
goto disable; goto disable;
get_online_cpus(); get_online_cpus();
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
next_cpu = cpumask_next(smp_processor_id(), current_mask); next_cpu = cpumask_next(smp_processor_id(), current_mask);
put_online_cpus(); put_online_cpus();
...@@ -371,9 +372,8 @@ static int start_kthread(struct trace_array *tr) ...@@ -371,9 +372,8 @@ static int start_kthread(struct trace_array *tr)
return 0; return 0;
/* Just pick the first CPU on first iteration */ /* Just pick the first CPU on first iteration */
current_mask = &save_cpumask;
get_online_cpus(); get_online_cpus();
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
put_online_cpus(); put_online_cpus();
next_cpu = cpumask_first(current_mask); next_cpu = cpumask_first(current_mask);
......
...@@ -20,7 +20,7 @@ DECLARE_RWSEM(trace_event_sem); ...@@ -20,7 +20,7 @@ DECLARE_RWSEM(trace_event_sem);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
static int next_event_type = __TRACE_LAST_TYPE + 1; static int next_event_type = __TRACE_LAST_TYPE;
enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
{ {
...@@ -675,11 +675,11 @@ static LIST_HEAD(ftrace_event_list); ...@@ -675,11 +675,11 @@ static LIST_HEAD(ftrace_event_list);
static int trace_search_list(struct list_head **list) static int trace_search_list(struct list_head **list)
{ {
struct trace_event *e; struct trace_event *e;
int last = __TRACE_LAST_TYPE; int next = __TRACE_LAST_TYPE;
if (list_empty(&ftrace_event_list)) { if (list_empty(&ftrace_event_list)) {
*list = &ftrace_event_list; *list = &ftrace_event_list;
return last + 1; return next;
} }
/* /*
...@@ -687,17 +687,17 @@ static int trace_search_list(struct list_head **list) ...@@ -687,17 +687,17 @@ static int trace_search_list(struct list_head **list)
* lets see if somebody freed one. * lets see if somebody freed one.
*/ */
list_for_each_entry(e, &ftrace_event_list, list) { list_for_each_entry(e, &ftrace_event_list, list) {
if (e->type != last + 1) if (e->type != next)
break; break;
last++; next++;
} }
/* Did we used up all 65 thousand events??? */ /* Did we used up all 65 thousand events??? */
if ((last + 1) > TRACE_EVENT_TYPE_MAX) if (next > TRACE_EVENT_TYPE_MAX)
return 0; return 0;
*list = &e->list; *list = &e->list;
return last + 1; return next;
} }
void trace_event_read_lock(void) void trace_event_read_lock(void)
......
...@@ -1456,7 +1456,6 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, ...@@ -1456,7 +1456,6 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
default: default:
return 0; return 0;
} }
return 0;
} }
static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
......
...@@ -329,22 +329,30 @@ const char * __init xbc_node_find_next_key_value(struct xbc_node *root, ...@@ -329,22 +329,30 @@ const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
/* XBC parse and tree build */ /* XBC parse and tree build */
static int __init xbc_init_node(struct xbc_node *node, char *data, u32 flag)
{
unsigned long offset = data - xbc_data;
if (WARN_ON(offset >= XBC_DATA_MAX))
return -EINVAL;
node->data = (u16)offset | flag;
node->child = 0;
node->next = 0;
return 0;
}
static struct xbc_node * __init xbc_add_node(char *data, u32 flag) static struct xbc_node * __init xbc_add_node(char *data, u32 flag)
{ {
struct xbc_node *node; struct xbc_node *node;
unsigned long offset;
if (xbc_node_num == XBC_NODE_MAX) if (xbc_node_num == XBC_NODE_MAX)
return NULL; return NULL;
node = &xbc_nodes[xbc_node_num++]; node = &xbc_nodes[xbc_node_num++];
offset = data - xbc_data; if (xbc_init_node(node, data, flag) < 0)
node->data = (u16)offset;
if (WARN_ON(offset >= XBC_DATA_MAX))
return NULL; return NULL;
node->data |= flag;
node->child = 0;
node->next = 0;
return node; return node;
} }
...@@ -603,7 +611,9 @@ static int __init xbc_parse_kv(char **k, char *v, int op) ...@@ -603,7 +611,9 @@ static int __init xbc_parse_kv(char **k, char *v, int op)
if (c < 0) if (c < 0)
return c; return c;
if (!xbc_add_sibling(v, XBC_VALUE)) if (op == ':' && child) {
xbc_init_node(child, v, XBC_VALUE);
} else if (!xbc_add_sibling(v, XBC_VALUE))
return -ENOMEM; return -ENOMEM;
if (c == ',') { /* Array */ if (c == ',') { /* Array */
...@@ -787,7 +797,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos) ...@@ -787,7 +797,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
p = buf; p = buf;
do { do {
q = strpbrk(p, "{}=+;\n#"); q = strpbrk(p, "{}=+;:\n#");
if (!q) { if (!q) {
p = skip_spaces(p); p = skip_spaces(p);
if (*p != '\0') if (*p != '\0')
...@@ -798,9 +808,12 @@ int __init xbc_init(char *buf, const char **emsg, int *epos) ...@@ -798,9 +808,12 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
c = *q; c = *q;
*q++ = '\0'; *q++ = '\0';
switch (c) { switch (c) {
case ':':
case '+': case '+':
if (*q++ != '=') { if (*q++ != '=') {
ret = xbc_parse_error("Wrong '+' operator", ret = xbc_parse_error(c == '+' ?
"Wrong '+' operator" :
"Wrong ':' operator",
q - 2); q - 2);
break; break;
} }
......
key.subkey = value
# We can not override pre-defined subkeys with value
key := value
key = value
# We can not override pre-defined value with subkey
key.subkey := value
# Override the value
key.word = 1,2,4
key.word := 2,3
# No pre-defined key
key.new.word := "new"
...@@ -117,6 +117,19 @@ xpass grep -q "bar" $OUTFILE ...@@ -117,6 +117,19 @@ xpass grep -q "bar" $OUTFILE
xpass grep -q "baz" $OUTFILE xpass grep -q "baz" $OUTFILE
xpass grep -q "qux" $OUTFILE xpass grep -q "qux" $OUTFILE
echo "Override same-key values"
cat > $TEMPCONF << EOF
key = bar, baz
key := qux
EOF
echo > $INITRD
xpass $BOOTCONF -a $TEMPCONF $INITRD
$BOOTCONF $INITRD > $OUTFILE
xfail grep -q "bar" $OUTFILE
xfail grep -q "baz" $OUTFILE
xpass grep -q "qux" $OUTFILE
echo "Double/single quotes test" echo "Double/single quotes test"
echo "key = '\"string\"';" > $TEMPCONF echo "key = '\"string\"';" > $TEMPCONF
$BOOTCONF -a $TEMPCONF $INITRD $BOOTCONF -a $TEMPCONF $INITRD
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment