Commit c5617b20 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-core-for-linus' of...

Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (61 commits)
  tracing: Add __used annotation to event variable
  perf, trace: Fix !x86 build bug
  perf report: Support multiple events on the TUI
  perf annotate: Fix up usage of the build id cache
  x86/mmiotrace: Remove redundant instruction prefix checks
  perf annotate: Add TUI interface
  perf tui: Remove annotate from popup menu after failure
  perf report: Don't start the TUI if -D is used
  perf: Fix getline undeclared
  perf: Optimize perf_tp_event_match()
  perf: Remove more code from the fastpath
  perf: Optimize the !vmalloc backed buffer
  perf: Optimize perf_output_copy()
  perf: Fix wakeup storm for RO mmap()s
  perf-record: Share per-cpu buffers
  perf-record: Remove -M
  perf: Ensure that IOC_OUTPUT isn't used to create multi-writer buffers
  perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track events
  perf, trace: Optimize tracepoints by removing IRQ-disable from perf/tracepoint interaction
  perf tui: Allow disabling the TUI on a per command basis in ~/.perfconfig
  ...
parents cad719d8 49c17746
......@@ -92,6 +92,8 @@ struct cpu_hw_events {
/* Enabled/disable state. */
int enabled;
unsigned int group_flag;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
......@@ -981,53 +983,6 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
static void event_sched_in(struct perf_event *event)
{
event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id();
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
if (is_software_event(event))
event->pmu->enable(event);
}
int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *sub;
int n0, n;
if (!sparc_pmu)
return 0;
n0 = cpuc->n_events;
n = collect_events(group_leader, perf_max_events - n0,
&cpuc->event[n0], &cpuc->events[n0],
&cpuc->current_idx[n0]);
if (n < 0)
return -EAGAIN;
if (check_excludes(cpuc->event, n0, n))
return -EINVAL;
if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
return -EAGAIN;
cpuc->n_events = n0 + n;
cpuc->n_added += n;
cpuctx->active_oncpu += n;
n = 1;
event_sched_in(group_leader);
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
if (sub->state != PERF_EVENT_STATE_OFF) {
event_sched_in(sub);
n++;
}
}
ctx->nr_active += n;
return 1;
}
static int sparc_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
......@@ -1045,11 +1000,20 @@ static int sparc_pmu_enable(struct perf_event *event)
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
goto nocheck;
if (check_excludes(cpuc->event, n0, 1))
goto out;
if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
goto out;
nocheck:
cpuc->n_events++;
cpuc->n_added++;
......@@ -1129,11 +1093,61 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0;
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
static void sparc_pmu_start_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
static void sparc_pmu_cancel_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
static int sparc_pmu_commit_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n;
if (!sparc_pmu)
return -EINVAL;
cpuc = &__get_cpu_var(cpu_hw_events);
n = cpuc->n_events;
if (check_excludes(cpuc->event, 0, n))
return -EINVAL;
if (sparc_check_constraints(cpuc->event, cpuc->events, n))
return -EAGAIN;
return 0;
}
static const struct pmu pmu = {
.enable = sparc_pmu_enable,
.disable = sparc_pmu_disable,
.read = sparc_pmu_read,
.unthrottle = sparc_pmu_unthrottle,
.start_txn = sparc_pmu_start_txn,
.cancel_txn = sparc_pmu_cancel_txn,
.commit_txn = sparc_pmu_commit_txn,
};
const struct pmu *hw_perf_event_init(struct perf_event *event)
......
......@@ -89,7 +89,8 @@
P4_CCCR_ENABLE)
/* HT mask */
#define P4_CCCR_MASK_HT (P4_CCCR_MASK | P4_CCCR_THREAD_ANY)
#define P4_CCCR_MASK_HT \
(P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
#define P4_GEN_ESCR_EMASK(class, name, bit) \
class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
......
......@@ -1717,7 +1717,11 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
*/
regs->bp = rewind_frame_pointer(skip + 1);
regs->cs = __KERNEL_CS;
local_save_flags(regs->flags);
/*
* We abuse bit 3 to pass exact information, see perf_misc_flags
* and the comment with PERF_EFLAGS_EXACT.
*/
regs->flags = 0;
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
......
......@@ -465,15 +465,21 @@ static int p4_hw_config(struct perf_event *event)
return rc;
}
static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
{
unsigned long dummy;
int overflow = 0;
u32 low, high;
rdmsrl(hwc->config_base + hwc->idx, dummy);
if (dummy & P4_CCCR_OVF) {
rdmsr(hwc->config_base + hwc->idx, low, high);
/* we need to check high bit for unflagged overflows */
if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
overflow = 1;
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
((u64)dummy) & ~P4_CCCR_OVF);
((u64)low) & ~P4_CCCR_OVF);
}
return overflow;
}
static inline void p4_pmu_disable_event(struct perf_event *event)
......@@ -584,21 +590,15 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
WARN_ON_ONCE(hwc->idx != idx);
/*
* FIXME: Redundant call, actually not needed
* but just to check if we're screwed
*/
p4_pmu_clear_cccr_ovf(hwc);
/* it might be unflagged overflow */
handled = p4_pmu_clear_cccr_ovf(hwc);
val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
continue;
/*
* event overflow
*/
handled = 1;
data.period = event->hw.last_period;
/* event overflow for sure */
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
......@@ -670,7 +670,7 @@ static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
/*
* ESCR address hashing is tricky, ESCRs are not sequential
* in memory but all starts from MSR_P4_BSU_ESCR0 (0x03e0) and
* in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
* the metric between any ESCRs is laid in range [0xa0,0xe1]
*
* so we make ~70% filled hashtable
......@@ -735,8 +735,9 @@ static int p4_get_escr_idx(unsigned int addr)
{
unsigned int idx = P4_ESCR_MSR_IDX(addr);
if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
!p4_escr_table[idx])) {
if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
!p4_escr_table[idx] ||
p4_escr_table[idx] != addr)) {
WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
return -1;
}
......@@ -762,7 +763,7 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
{
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
int cpu = raw_smp_processor_id();
int cpu = smp_processor_id();
struct hw_perf_event *hwc;
struct p4_event_bind *bind;
unsigned int i, thread, num;
......
......@@ -34,7 +34,7 @@
/* IA32 Manual 3, 2-1 */
static unsigned char prefix_codes[] = {
0xF0, 0xF2, 0xF3, 0x2E, 0x36, 0x3E, 0x26, 0x64,
0x65, 0x2E, 0x3E, 0x66, 0x67
0x65, 0x66, 0x67
};
/* IA32 Manual 3, 3-432*/
static unsigned int reg_rop[] = {
......
......@@ -73,18 +73,25 @@ struct trace_iterator {
};
struct trace_event;
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
int flags);
struct trace_event {
struct hlist_node node;
struct list_head list;
int type;
int flags, struct trace_event *event);
struct trace_event_functions {
trace_print_func trace;
trace_print_func raw;
trace_print_func hex;
trace_print_func binary;
};
struct trace_event {
struct hlist_node node;
struct list_head list;
int type;
struct trace_event_functions *funcs;
};
extern int register_ftrace_event(struct trace_event *event);
extern int unregister_ftrace_event(struct trace_event *event);
......@@ -116,28 +123,70 @@ void tracing_record_cmdline(struct task_struct *tsk);
struct event_filter;
enum trace_reg {
TRACE_REG_REGISTER,
TRACE_REG_UNREGISTER,
TRACE_REG_PERF_REGISTER,
TRACE_REG_PERF_UNREGISTER,
};
struct ftrace_event_call;
struct ftrace_event_class {
char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
#endif
int (*reg)(struct ftrace_event_call *event,
enum trace_reg type);
int (*define_fields)(struct ftrace_event_call *);
struct list_head *(*get_fields)(struct ftrace_event_call *);
struct list_head fields;
int (*raw_init)(struct ftrace_event_call *);
};
enum {
TRACE_EVENT_FL_ENABLED_BIT,
TRACE_EVENT_FL_FILTERED_BIT,
};
enum {
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
};
struct ftrace_event_call {
struct list_head list;
struct ftrace_event_class *class;
char *name;
char *system;
struct dentry *dir;
struct trace_event *event;
int enabled;
int (*regfunc)(struct ftrace_event_call *);
void (*unregfunc)(struct ftrace_event_call *);
int id;
struct trace_event event;
const char *print_fmt;
int (*raw_init)(struct ftrace_event_call *);
int (*define_fields)(struct ftrace_event_call *);
struct list_head fields;
int filter_active;
struct event_filter *filter;
void *mod;
void *data;
/*
* 32 bit flags:
* bit 1: enabled
* bit 2: filter_active
*
* Changes to flags must hold the event_mutex.
*
* Note: Reads of flags do not hold the event_mutex since
* they occur in critical sections. But the way flags
* is currently used, these changes do no affect the code
* except that when a change is made, it may have a slight
* delay in propagating the changes to other CPUs due to
* caching and such.
*/
unsigned int flags;
#ifdef CONFIG_PERF_EVENTS
int perf_refcount;
int (*perf_event_enable)(struct ftrace_event_call *);
void (*perf_event_disable)(struct ftrace_event_call *);
struct hlist_head *perf_events;
#endif
};
#define PERF_MAX_TRACE_SIZE 2048
......@@ -194,24 +243,22 @@ struct perf_event;
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
extern int perf_trace_enable(int event_id);
extern void perf_trace_disable(int event_id);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
extern int perf_trace_enable(struct perf_event *event);
extern void perf_trace_disable(struct perf_event *event);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
extern void *
perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
unsigned long *irq_flags);
extern void *perf_trace_buf_prepare(int size, unsigned short type,
struct pt_regs *regs, int *rctxp);
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
u64 count, unsigned long irq_flags, struct pt_regs *regs)
u64 count, struct pt_regs *regs, void *head)
{
struct trace_entry *entry = raw_data;
perf_tp_event(entry->type, addr, count, raw_data, size, regs);
perf_tp_event(addr, count, raw_data, size, regs, head);
perf_swevent_put_recursion_context(rctx);
local_irq_restore(irq_flags);
}
#endif
......
......@@ -485,6 +485,7 @@ struct perf_guest_info_callbacks {
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <asm/atomic.h>
#include <asm/local.h>
#define PERF_MAX_STACK_DEPTH 255
......@@ -587,21 +588,19 @@ struct perf_mmap_data {
struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct work;
int page_order; /* allocation order */
#endif
int data_order;
int nr_pages; /* nr of data pages */
int writable; /* are we writable */
int nr_locked; /* nr pages mlocked */
atomic_t poll; /* POLL_ for wakeups */
atomic_t events; /* event_id limit */
atomic_long_t head; /* write position */
atomic_long_t done_head; /* completed head */
atomic_t lock; /* concurrent writes */
atomic_t wakeup; /* needs a wakeup */
atomic_t lost; /* nr records lost */
local_t head; /* write position */
local_t nest; /* nested writers */
local_t events; /* event limit */
local_t wakeup; /* wakeup stamp */
local_t lost; /* nr records lost */
long watermark; /* wakeup watermark */
......@@ -728,6 +727,7 @@ struct perf_event {
perf_overflow_handler_t overflow_handler;
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call *tp_event;
struct event_filter *filter;
#endif
......@@ -803,11 +803,12 @@ struct perf_cpu_context {
struct perf_output_handle {
struct perf_event *event;
struct perf_mmap_data *data;
unsigned long head;
unsigned long offset;
unsigned long wakeup;
unsigned long size;
void *addr;
int page;
int nmi;
int sample;
int locked;
};
#ifdef CONFIG_PERF_EVENTS
......@@ -993,8 +994,9 @@ static inline bool perf_paranoid_kernel(void)
}
extern void perf_event_init(void);
extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs);
extern void perf_tp_event(u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
......
......@@ -103,22 +103,6 @@ struct perf_event_attr;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
#ifdef CONFIG_PERF_EVENTS
#define TRACE_SYS_ENTER_PERF_INIT(sname) \
.perf_event_enable = perf_sysenter_enable, \
.perf_event_disable = perf_sysenter_disable,
#define TRACE_SYS_EXIT_PERF_INIT(sname) \
.perf_event_enable = perf_sysexit_enable, \
.perf_event_disable = perf_sysexit_disable,
#else
#define TRACE_SYS_ENTER_PERF(sname)
#define TRACE_SYS_ENTER_PERF_INIT(sname)
#define TRACE_SYS_EXIT_PERF(sname)
#define TRACE_SYS_EXIT_PERF_INIT(sname)
#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_FTRACE_SYSCALLS
#define __SC_STR_ADECL1(t, a) #a
#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
......@@ -134,54 +118,43 @@ struct perf_event_attr;
#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
extern struct ftrace_event_class event_class_syscall_enter;
extern struct ftrace_event_class event_class_syscall_exit;
extern struct trace_event_functions enter_syscall_print_funcs;
extern struct trace_event_functions exit_syscall_print_funcs;
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
static const struct syscall_metadata __syscall_meta_##sname; \
static struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call \
__attribute__((__aligned__(4))) event_enter_##sname; \
static struct trace_event enter_syscall_print_##sname = { \
.trace = print_syscall_enter, \
}; \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \
event_enter_##sname = { \
.name = "sys_enter"#sname, \
.system = "syscalls", \
.event = &enter_syscall_print_##sname, \
.raw_init = init_syscall_trace, \
.define_fields = syscall_enter_define_fields, \
.regfunc = reg_event_syscall_enter, \
.unregfunc = unreg_event_syscall_enter, \
.class = &event_class_syscall_enter, \
.event.funcs = &enter_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
TRACE_SYS_ENTER_PERF_INIT(sname) \
}
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static const struct syscall_metadata __syscall_meta_##sname; \
static struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call \
__attribute__((__aligned__(4))) event_exit_##sname; \
static struct trace_event exit_syscall_print_##sname = { \
.trace = print_syscall_exit, \
}; \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \
event_exit_##sname = { \
.name = "sys_exit"#sname, \
.system = "syscalls", \
.event = &exit_syscall_print_##sname, \
.raw_init = init_syscall_trace, \
.define_fields = syscall_exit_define_fields, \
.regfunc = reg_event_syscall_exit, \
.unregfunc = unreg_event_syscall_exit, \
.class = &event_class_syscall_exit, \
.event.funcs = &exit_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
TRACE_SYS_EXIT_PERF_INIT(sname) \
}
#define SYSCALL_METADATA(sname, nb) \
SYSCALL_TRACE_ENTER_EVENT(sname); \
SYSCALL_TRACE_EXIT_EVENT(sname); \
static const struct syscall_metadata __used \
static struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \
......@@ -191,12 +164,14 @@ struct perf_event_attr;
.args = args_##sname, \
.enter_event = &event_enter_##sname, \
.exit_event = &event_exit_##sname, \
.enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
.exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
};
#define SYSCALL_DEFINE0(sname) \
SYSCALL_TRACE_ENTER_EVENT(_##sname); \
SYSCALL_TRACE_EXIT_EVENT(_##sname); \
static const struct syscall_metadata __used \
static struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta__##sname = { \
......@@ -204,6 +179,8 @@ struct perf_event_attr;
.nb_args = 0, \
.enter_event = &event_enter__##sname, \
.exit_event = &event_exit__##sname, \
.enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
.exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
}; \
asmlinkage long sys_##sname(void)
#else
......
......@@ -20,12 +20,17 @@
struct module;
struct tracepoint;
struct tracepoint_func {
void *func;
void *data;
};
struct tracepoint {
const char *name; /* Tracepoint name */
int state; /* State. */
void (*regfunc)(void);
void (*unregfunc)(void);
void **funcs;
struct tracepoint_func *funcs;
} __attribute__((aligned(32))); /*
* Aligned on 32 bytes because it is
* globally visible and gcc happily
......@@ -37,16 +42,19 @@ struct tracepoint {
* Connect a probe to a tracepoint.
* Internal API, should not be used directly.
*/
extern int tracepoint_probe_register(const char *name, void *probe);
extern int tracepoint_probe_register(const char *name, void *probe, void *data);
/*
* Disconnect a probe from a tracepoint.
* Internal API, should not be used directly.
*/
extern int tracepoint_probe_unregister(const char *name, void *probe);
extern int
tracepoint_probe_unregister(const char *name, void *probe, void *data);
extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
void *data);
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void *data);
extern void tracepoint_probe_update_all(void);
struct tracepoint_iter {
......@@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
/*
* it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL.
*
* Note, the proto and args passed in includes "__data" as the first parameter.
* The reason for this is to handle the "void" prototype. If a tracepoint
* has a "void" prototype, then it is invalid to declare a function
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
#define __DO_TRACE(tp, proto, args) \
do { \
void **it_func; \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
\
rcu_read_lock_sched_notrace(); \
it_func = rcu_dereference_sched((tp)->funcs); \
if (it_func) { \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
do { \
((void(*)(proto))(*it_func))(args); \
} while (*(++it_func)); \
it_func = (it_func_ptr)->func; \
__data = (it_func_ptr)->data; \
((void(*)(proto))(it_func))(args); \
} while ((++it_func_ptr)->func); \
} \
rcu_read_unlock_sched_notrace(); \
} while (0)
......@@ -122,24 +140,32 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*/
#define DECLARE_TRACE(name, proto, args) \
#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
if (unlikely(__tracepoint_##name.state)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(proto), TP_ARGS(args)); \
TP_PROTO(data_proto), \
TP_ARGS(data_args)); \
} \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
{ \
return tracepoint_probe_register(#name, (void *)probe, \
data); \
} \
static inline int register_trace_##name(void (*probe)(proto)) \
static inline int \
unregister_trace_##name(void (*probe)(data_proto), void *data) \
{ \
return tracepoint_probe_register(#name, (void *)probe); \
return tracepoint_probe_unregister(#name, (void *)probe, \
data); \
} \
static inline int unregister_trace_##name(void (*probe)(proto)) \
static inline void \
check_trace_callback_type_##name(void (*cb)(data_proto)) \
{ \
return tracepoint_probe_unregister(#name, (void *)probe);\
}
#define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \
......@@ -156,18 +182,23 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
EXPORT_SYMBOL(__tracepoint_##name)
#else /* !CONFIG_TRACEPOINTS */
#define DECLARE_TRACE(name, proto, args) \
static inline void _do_trace_##name(struct tracepoint *tp, proto) \
{ } \
#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
static inline void trace_##name(proto) \
{ } \
static inline int register_trace_##name(void (*probe)(proto)) \
static inline int \
register_trace_##name(void (*probe)(data_proto), \
void *data) \
{ \
return -ENOSYS; \
} \
static inline int unregister_trace_##name(void (*probe)(proto)) \
static inline int \
unregister_trace_##name(void (*probe)(data_proto), \
void *data) \
{ \
return -ENOSYS; \
} \
static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
{ \
}
#define DEFINE_TRACE_FN(name, reg, unreg)
......@@ -176,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
#define EXPORT_TRACEPOINT_SYMBOL(name)
#endif /* CONFIG_TRACEPOINTS */
/*
* The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
* (void). "void" is a special value in a function prototype and can
* not be combined with other arguments. Since the DECLARE_TRACE()
* macro adds a data element at the beginning of the prototype,
* we need a way to differentiate "(void *data, proto)" from
* "(void *data, void)". The second prototype is invalid.
*
* DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
* and "void *__data" as the callback prototype.
*
* DECLARE_TRACE() passes "proto" as the tracepoint protoype and
* "void *__data, proto" as the callback prototype.
*/
#define DECLARE_TRACE_NOARGS(name) \
__DECLARE_TRACE(name, void, , void *__data, __data)
#define DECLARE_TRACE(name, proto, args) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
PARAMS(void *__data, proto), \
PARAMS(__data, args))
#endif /* DECLARE_TRACE */
#ifndef TRACE_EVENT
......
This diff is collapsed.
......@@ -25,6 +25,8 @@ struct syscall_metadata {
int nb_args;
const char **types;
const char **args;
struct list_head enter_fields;
struct list_head exit_fields;
struct ftrace_event_call *enter_event;
struct ftrace_event_call *exit_event;
......@@ -34,16 +36,16 @@ struct syscall_metadata {
extern unsigned long arch_syscall_addr(int nr);
extern int init_syscall_trace(struct ftrace_event_call *call);
extern int syscall_enter_define_fields(struct ftrace_event_call *call);
extern int syscall_exit_define_fields(struct ftrace_event_call *call);
extern int reg_event_syscall_enter(struct ftrace_event_call *call);
extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
extern int reg_event_syscall_exit(struct ftrace_event_call *call);
extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
extern int
ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
struct trace_event *event);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
struct trace_event *event);
#endif
#ifdef CONFIG_PERF_EVENTS
......
This diff is collapsed.
This diff is collapsed.
......@@ -3234,7 +3234,8 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
}
static void
ftrace_graph_probe_sched_switch(struct task_struct *prev, struct task_struct *next)
ftrace_graph_probe_sched_switch(void *ignore,
struct task_struct *prev, struct task_struct *next)
{
unsigned long long timestamp;
int index;
......@@ -3288,7 +3289,7 @@ static int start_graph_tracing(void)
} while (ret == -EAGAIN);
if (!ret) {
ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
if (ret)
pr_info("ftrace_graph: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n");
......@@ -3364,7 +3365,7 @@ void unregister_ftrace_graph(void)
ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
out:
mutex_unlock(&ftrace_lock);
......
......@@ -95,7 +95,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
trace_wake_up();
}
static void kmemtrace_kmalloc(unsigned long call_site,
static void kmemtrace_kmalloc(void *ignore,
unsigned long call_site,
const void *ptr,
size_t bytes_req,
size_t bytes_alloc,
......@@ -105,7 +106,8 @@ static void kmemtrace_kmalloc(unsigned long call_site,
bytes_req, bytes_alloc, gfp_flags, -1);
}
static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
static void kmemtrace_kmem_cache_alloc(void *ignore,
unsigned long call_site,
const void *ptr,
size_t bytes_req,
size_t bytes_alloc,
......@@ -115,7 +117,8 @@ static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
bytes_req, bytes_alloc, gfp_flags, -1);
}
static void kmemtrace_kmalloc_node(unsigned long call_site,
static void kmemtrace_kmalloc_node(void *ignore,
unsigned long call_site,
const void *ptr,
size_t bytes_req,
size_t bytes_alloc,
......@@ -126,7 +129,8 @@ static void kmemtrace_kmalloc_node(unsigned long call_site,
bytes_req, bytes_alloc, gfp_flags, node);
}
static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
static void kmemtrace_kmem_cache_alloc_node(void *ignore,
unsigned long call_site,
const void *ptr,
size_t bytes_req,
size_t bytes_alloc,
......@@ -137,12 +141,14 @@ static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
bytes_req, bytes_alloc, gfp_flags, node);
}
static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
static void
kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr)
{
kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
}
static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
static void kmemtrace_kmem_cache_free(void *ignore,
unsigned long call_site, const void *ptr)
{
kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
}
......@@ -151,34 +157,34 @@ static int kmemtrace_start_probes(void)
{
int err;
err = register_trace_kmalloc(kmemtrace_kmalloc);
err = register_trace_kmalloc(kmemtrace_kmalloc, NULL);
if (err)
return err;
err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
if (err)
return err;
err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
if (err)
return err;
err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
if (err)
return err;
err = register_trace_kfree(kmemtrace_kfree);
err = register_trace_kfree(kmemtrace_kfree, NULL);
if (err)
return err;
err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
return err;
}
static void kmemtrace_stop_probes(void)
{
unregister_trace_kmalloc(kmemtrace_kmalloc);
unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
unregister_trace_kfree(kmemtrace_kfree);
unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
unregister_trace_kmalloc(kmemtrace_kmalloc, NULL);
unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
unregister_trace_kfree(kmemtrace_kfree, NULL);
unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
}
static int kmem_trace_init(struct trace_array *tr)
......@@ -237,7 +243,8 @@ struct kmemtrace_user_event_alloc {
};
static enum print_line_t
kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
kmemtrace_print_alloc(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct trace_seq *s = &iter->seq;
struct kmemtrace_alloc_entry *entry;
......@@ -257,7 +264,8 @@ kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
}
static enum print_line_t
kmemtrace_print_free(struct trace_iterator *iter, int flags)
kmemtrace_print_free(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct trace_seq *s = &iter->seq;
struct kmemtrace_free_entry *entry;
......@@ -275,7 +283,8 @@ kmemtrace_print_free(struct trace_iterator *iter, int flags)
}
static enum print_line_t
kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct trace_seq *s = &iter->seq;
struct kmemtrace_alloc_entry *entry;
......@@ -309,7 +318,8 @@ kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
}
static enum print_line_t
kmemtrace_print_free_user(struct trace_iterator *iter, int flags)
kmemtrace_print_free_user(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct trace_seq *s = &iter->seq;
struct kmemtrace_free_entry *entry;
......@@ -463,18 +473,26 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
}
}
static struct trace_event kmem_trace_alloc = {
.type = TRACE_KMEM_ALLOC,
static struct trace_event_functions kmem_trace_alloc_funcs = {
.trace = kmemtrace_print_alloc,
.binary = kmemtrace_print_alloc_user,
};
static struct trace_event kmem_trace_free = {
.type = TRACE_KMEM_FREE,
static struct trace_event kmem_trace_alloc = {
.type = TRACE_KMEM_ALLOC,
.funcs = &kmem_trace_alloc_funcs,
};
static struct trace_event_functions kmem_trace_free_funcs = {
.trace = kmemtrace_print_free,
.binary = kmemtrace_print_free_user,
};
static struct trace_event kmem_trace_free = {
.type = TRACE_KMEM_FREE,
.funcs = &kmem_trace_free_funcs,
};
static struct tracer kmem_tracer __read_mostly = {
.name = "kmemtrace",
.init = kmem_trace_init,
......
......@@ -1936,7 +1936,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
}
if (event)
return event->trace(iter, sym_flags);
return event->funcs->trace(iter, sym_flags, event);
if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
goto partial;
......@@ -1962,7 +1962,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
event = ftrace_find_event(entry->type);
if (event)
return event->raw(iter, 0);
return event->funcs->raw(iter, 0, event);
if (!trace_seq_printf(s, "%d ?\n", entry->type))
goto partial;
......@@ -1989,7 +1989,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
event = ftrace_find_event(entry->type);
if (event) {
enum print_line_t ret = event->hex(iter, 0);
enum print_line_t ret = event->funcs->hex(iter, 0, event);
if (ret != TRACE_TYPE_HANDLED)
return ret;
}
......@@ -2014,7 +2014,8 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
}
event = ftrace_find_event(entry->type);
return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
return event ? event->funcs->binary(iter, 0, event) :
TRACE_TYPE_HANDLED;
}
int trace_empty(struct trace_iterator *iter)
......
......@@ -405,12 +405,12 @@ void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc);
#else
static inline void ftrace_trace_stack(struct trace_array *tr,
static inline void ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags, int skip, int pc)
{
}
static inline void ftrace_trace_userstack(struct trace_array *tr,
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc)
{
}
......@@ -778,12 +778,15 @@ extern void print_subsystem_event_filter(struct event_subsystem *system,
struct trace_seq *s);
extern int filter_assign_type(const char *type);
struct list_head *
trace_get_fields(struct ftrace_event_call *event_call);
static inline int
filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
if (unlikely(call->filter_active) &&
if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
!filter_match_preds(call->filter, rec)) {
ring_buffer_discard_commit(buffer, event);
return 1;
......
......@@ -143,7 +143,7 @@ static void branch_trace_reset(struct trace_array *tr)
}
static enum print_line_t trace_branch_print(struct trace_iterator *iter,
int flags)
int flags, struct trace_event *event)
{
struct trace_branch *field;
......@@ -167,9 +167,13 @@ static void branch_print_header(struct seq_file *s)
" |\n");
}
static struct trace_event_functions trace_branch_funcs = {
.trace = trace_branch_print,
};
static struct trace_event trace_branch_event = {
.type = TRACE_BRANCH,
.trace = trace_branch_print,
.funcs = &trace_branch_funcs,
};
static struct tracer branch_trace __read_mostly =
......
......@@ -9,13 +9,9 @@
#include <linux/kprobes.h>
#include "trace.h"
DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
static char *perf_trace_buf;
static char *perf_trace_buf_nmi;
static char *perf_trace_buf[4];
/*
* Force it to be aligned to unsigned long to avoid misaligned accesses
......@@ -27,57 +23,82 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
/* Count the events in use (per event id, not per instance) */
static int total_ref_count;
static int perf_trace_event_enable(struct ftrace_event_call *event)
static int perf_trace_event_init(struct ftrace_event_call *tp_event,
struct perf_event *p_event)
{
char *buf;
struct hlist_head *list;
int ret = -ENOMEM;
int cpu;
if (event->perf_refcount++ > 0)
p_event->tp_event = tp_event;
if (tp_event->perf_refcount++ > 0)
return 0;
if (!total_ref_count) {
buf = (char *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail_buf;
list = alloc_percpu(struct hlist_head);
if (!list)
goto fail;
rcu_assign_pointer(perf_trace_buf, buf);
for_each_possible_cpu(cpu)
INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
buf = (char *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail_buf_nmi;
tp_event->perf_events = list;
rcu_assign_pointer(perf_trace_buf_nmi, buf);
}
if (!total_ref_count) {
char *buf;
int i;
ret = event->perf_event_enable(event);
if (!ret) {
total_ref_count++;
return 0;
for (i = 0; i < 4; i++) {
buf = (char *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail;
perf_trace_buf[i] = buf;
}
}
fail_buf_nmi:
if (tp_event->class->reg)
ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
else
ret = tracepoint_probe_register(tp_event->name,
tp_event->class->perf_probe,
tp_event);
if (ret)
goto fail;
total_ref_count++;
return 0;
fail:
if (!total_ref_count) {
free_percpu(perf_trace_buf_nmi);
free_percpu(perf_trace_buf);
perf_trace_buf_nmi = NULL;
perf_trace_buf = NULL;
int i;
for (i = 0; i < 4; i++) {
free_percpu(perf_trace_buf[i]);
perf_trace_buf[i] = NULL;
}
}
if (!--tp_event->perf_refcount) {
free_percpu(tp_event->perf_events);
tp_event->perf_events = NULL;
}
fail_buf:
event->perf_refcount--;
return ret;
}
int perf_trace_enable(int event_id)
int perf_trace_init(struct perf_event *p_event)
{
struct ftrace_event_call *event;
struct ftrace_event_call *tp_event;
int event_id = p_event->attr.config;
int ret = -EINVAL;
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id && event->perf_event_enable &&
try_module_get(event->mod)) {
ret = perf_trace_event_enable(event);
list_for_each_entry(tp_event, &ftrace_events, list) {
if (tp_event->event.type == event_id &&
tp_event->class && tp_event->class->perf_probe &&
try_module_get(tp_event->mod)) {
ret = perf_trace_event_init(tp_event, p_event);
break;
}
}
......@@ -86,90 +107,78 @@ int perf_trace_enable(int event_id)
return ret;
}
static void perf_trace_event_disable(struct ftrace_event_call *event)
int perf_trace_enable(struct perf_event *p_event)
{
char *buf, *nmi_buf;
if (--event->perf_refcount > 0)
return;
event->perf_event_disable(event);
struct ftrace_event_call *tp_event = p_event->tp_event;
struct hlist_head *list;
if (!--total_ref_count) {
buf = perf_trace_buf;
rcu_assign_pointer(perf_trace_buf, NULL);
list = tp_event->perf_events;
if (WARN_ON_ONCE(!list))
return -EINVAL;
nmi_buf = perf_trace_buf_nmi;
rcu_assign_pointer(perf_trace_buf_nmi, NULL);
list = per_cpu_ptr(list, smp_processor_id());
hlist_add_head_rcu(&p_event->hlist_entry, list);
/*
* Ensure every events in profiling have finished before
* releasing the buffers
*/
synchronize_sched();
return 0;
}
free_percpu(buf);
free_percpu(nmi_buf);
}
void perf_trace_disable(struct perf_event *p_event)
{
hlist_del_rcu(&p_event->hlist_entry);
}
void perf_trace_disable(int event_id)
void perf_trace_destroy(struct perf_event *p_event)
{
struct ftrace_event_call *event;
struct ftrace_event_call *tp_event = p_event->tp_event;
int i;
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id) {
perf_trace_event_disable(event);
module_put(event->mod);
break;
if (--tp_event->perf_refcount > 0)
return;
if (tp_event->class->reg)
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
else
tracepoint_probe_unregister(tp_event->name,
tp_event->class->perf_probe,
tp_event);
free_percpu(tp_event->perf_events);
tp_event->perf_events = NULL;
if (!--total_ref_count) {
for (i = 0; i < 4; i++) {
free_percpu(perf_trace_buf[i]);
perf_trace_buf[i] = NULL;
}
}
mutex_unlock(&event_mutex);
}
__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
int *rctxp, unsigned long *irq_flags)
struct pt_regs *regs, int *rctxp)
{
struct trace_entry *entry;
char *trace_buf, *raw_data;
int pc, cpu;
unsigned long flags;
char *raw_data;
int pc;
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
pc = preempt_count();
/* Protect the per cpu buffer, begin the rcu read side */
local_irq_save(*irq_flags);
*rctxp = perf_swevent_get_recursion_context();
if (*rctxp < 0)
goto err_recursion;
cpu = smp_processor_id();
if (in_nmi())
trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
else
trace_buf = rcu_dereference_sched(perf_trace_buf);
if (!trace_buf)
goto err;
return NULL;
raw_data = per_cpu_ptr(trace_buf, cpu);
raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id());
/* zero the dead bytes from align to not leak stack to user */
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
entry = (struct trace_entry *)raw_data;
tracing_generic_entry_update(entry, *irq_flags, pc);
local_save_flags(flags);
tracing_generic_entry_update(entry, flags, pc);
entry->type = type;
return raw_data;
err:
perf_swevent_put_recursion_context(*rctxp);
err_recursion:
local_irq_restore(*irq_flags);
return NULL;
}
EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
This diff is collapsed.
......@@ -500,8 +500,10 @@ static struct ftrace_event_field *
find_event_field(struct ftrace_event_call *call, char *name)
{
struct ftrace_event_field *field;
struct list_head *head;
list_for_each_entry(field, &call->fields, link) {
head = trace_get_fields(call);
list_for_each_entry(field, head, link) {
if (!strcmp(field->name, name))
return field;
}
......@@ -545,7 +547,7 @@ static void filter_disable_preds(struct ftrace_event_call *call)
struct event_filter *filter = call->filter;
int i;
call->filter_active = 0;
call->flags &= ~TRACE_EVENT_FL_FILTERED;
filter->n_preds = 0;
for (i = 0; i < MAX_FILTER_PRED; i++)
......@@ -572,7 +574,7 @@ void destroy_preds(struct ftrace_event_call *call)
{
__free_preds(call->filter);
call->filter = NULL;
call->filter_active = 0;
call->flags &= ~TRACE_EVENT_FL_FILTERED;
}
static struct event_filter *__alloc_preds(void)
......@@ -611,7 +613,7 @@ static int init_preds(struct ftrace_event_call *call)
if (call->filter)
return 0;
call->filter_active = 0;
call->flags &= ~TRACE_EVENT_FL_FILTERED;
call->filter = __alloc_preds();
if (IS_ERR(call->filter))
return PTR_ERR(call->filter);
......@@ -625,10 +627,10 @@ static int init_subsystem_preds(struct event_subsystem *system)
int err;
list_for_each_entry(call, &ftrace_events, list) {
if (!call->define_fields)
if (!call->class || !call->class->define_fields)
continue;
if (strcmp(call->system, system->name) != 0)
if (strcmp(call->class->system, system->name) != 0)
continue;
err = init_preds(call);
......@@ -644,10 +646,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system)
struct ftrace_event_call *call;
list_for_each_entry(call, &ftrace_events, list) {
if (!call->define_fields)
if (!call->class || !call->class->define_fields)
continue;
if (strcmp(call->system, system->name) != 0)
if (strcmp(call->class->system, system->name) != 0)
continue;
filter_disable_preds(call);
......@@ -1249,10 +1251,10 @@ static int replace_system_preds(struct event_subsystem *system,
list_for_each_entry(call, &ftrace_events, list) {
struct event_filter *filter = call->filter;
if (!call->define_fields)
if (!call->class || !call->class->define_fields)
continue;
if (strcmp(call->system, system->name) != 0)
if (strcmp(call->class->system, system->name) != 0)
continue;
/* try to see if the filter can be applied */
......@@ -1266,7 +1268,7 @@ static int replace_system_preds(struct event_subsystem *system,
if (err)
filter_disable_preds(call);
else {
call->filter_active = 1;
call->flags |= TRACE_EVENT_FL_FILTERED;
replace_filter_string(filter, filter_string);
}
fail = false;
......@@ -1315,7 +1317,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
if (err)
append_filter_err(ps, call->filter);
else
call->filter_active = 1;
call->flags |= TRACE_EVENT_FL_FILTERED;
out:
filter_opstack_clear(ps);
postfix_clear(ps);
......@@ -1393,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
mutex_lock(&event_mutex);
list_for_each_entry(call, &ftrace_events, list) {
if (call->id == event_id)
if (call->event.type == event_id)
break;
}
......
......@@ -127,7 +127,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
static int ftrace_raw_init_event(struct ftrace_event_call *call)
{
INIT_LIST_HEAD(&call->fields);
INIT_LIST_HEAD(&call->class->fields);
return 0;
}
......@@ -153,17 +153,21 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
#define F_printk(fmt, args...) #fmt ", " __stringify(args)
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
\
struct ftrace_event_class event_class_ftrace_##call = { \
.system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \
.raw_init = ftrace_raw_init_event, \
}; \
\
struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.id = type, \
.system = __stringify(TRACE_SYSTEM), \
.raw_init = ftrace_raw_init_event, \
.event.type = etype, \
.class = &event_class_ftrace_##call, \
.print_fmt = print, \
.define_fields = ftrace_define_fields_##call, \
}; \
#include "trace_entries.h"
......@@ -1025,7 +1025,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
if (!event)
return TRACE_TYPE_UNHANDLED;
ret = event->trace(iter, sym_flags);
ret = event->funcs->trace(iter, sym_flags, event);
if (ret != TRACE_TYPE_HANDLED)
return ret;
}
......@@ -1112,7 +1112,8 @@ print_graph_function(struct trace_iterator *iter)
}
static enum print_line_t
print_graph_function_event(struct trace_iterator *iter, int flags)
print_graph_function_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
return print_graph_function(iter);
}
......@@ -1225,14 +1226,18 @@ void graph_trace_close(struct trace_iterator *iter)
}
}
static struct trace_event_functions graph_functions = {
.trace = print_graph_function_event,
};
static struct trace_event graph_trace_entry_event = {
.type = TRACE_GRAPH_ENT,
.trace = print_graph_function_event,
.funcs = &graph_functions,
};
static struct trace_event graph_trace_ret_event = {
.type = TRACE_GRAPH_RET,
.trace = print_graph_function_event,
.funcs = &graph_functions
};
static struct tracer graph_trace __read_mostly = {
......
......@@ -324,8 +324,8 @@ struct trace_probe {
unsigned long nhit;
unsigned int flags; /* For TP_FLAG_* */
const char *symbol; /* symbol name */
struct ftrace_event_class class;
struct ftrace_event_call call;
struct trace_event event;
ssize_t size; /* trace entry size */
unsigned int nr_args;
struct probe_arg args[];
......@@ -404,6 +404,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
goto error;
}
tp->call.class = &tp->class;
tp->call.name = kstrdup(event, GFP_KERNEL);
if (!tp->call.name)
goto error;
......@@ -413,8 +414,8 @@ static struct trace_probe *alloc_trace_probe(const char *group,
goto error;
}
tp->call.system = kstrdup(group, GFP_KERNEL);
if (!tp->call.system)
tp->class.system = kstrdup(group, GFP_KERNEL);
if (!tp->class.system)
goto error;
INIT_LIST_HEAD(&tp->list);
......@@ -443,7 +444,7 @@ static void free_trace_probe(struct trace_probe *tp)
for (i = 0; i < tp->nr_args; i++)
free_probe_arg(&tp->args[i]);
kfree(tp->call.system);
kfree(tp->call.class->system);
kfree(tp->call.name);
kfree(tp->symbol);
kfree(tp);
......@@ -456,7 +457,7 @@ static struct trace_probe *find_probe_event(const char *event,
list_for_each_entry(tp, &probe_list, list)
if (strcmp(tp->call.name, event) == 0 &&
strcmp(tp->call.system, group) == 0)
strcmp(tp->call.class->system, group) == 0)
return tp;
return NULL;
}
......@@ -481,7 +482,7 @@ static int register_trace_probe(struct trace_probe *tp)
mutex_lock(&probe_lock);
/* register as an event */
old_tp = find_probe_event(tp->call.name, tp->call.system);
old_tp = find_probe_event(tp->call.name, tp->call.class->system);
if (old_tp) {
/* delete old event */
unregister_trace_probe(old_tp);
......@@ -904,7 +905,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
int i;
seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
if (!tp->symbol)
seq_printf(m, " 0x%p", tp->rp.kp.addr);
......@@ -1061,8 +1062,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
size = sizeof(*entry) + tp->size;
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
irq_flags, pc);
event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
size, irq_flags, pc);
if (!event)
return;
......@@ -1094,8 +1095,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
size = sizeof(*entry) + tp->size;
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
irq_flags, pc);
event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
size, irq_flags, pc);
if (!event)
return;
......@@ -1112,18 +1113,17 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
/* Event entry printers */
enum print_line_t
print_kprobe_event(struct trace_iterator *iter, int flags)
print_kprobe_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct kprobe_trace_entry_head *field;
struct trace_seq *s = &iter->seq;
struct trace_event *event;
struct trace_probe *tp;
u8 *data;
int i;
field = (struct kprobe_trace_entry_head *)iter->ent;
event = ftrace_find_event(field->ent.type);
tp = container_of(event, struct trace_probe, event);
tp = container_of(event, struct trace_probe, call.event);
if (!trace_seq_printf(s, "%s: (", tp->call.name))
goto partial;
......@@ -1149,18 +1149,17 @@ print_kprobe_event(struct trace_iterator *iter, int flags)
}
enum print_line_t
print_kretprobe_event(struct trace_iterator *iter, int flags)
print_kretprobe_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct kretprobe_trace_entry_head *field;
struct trace_seq *s = &iter->seq;
struct trace_event *event;
struct trace_probe *tp;
u8 *data;
int i;
field = (struct kretprobe_trace_entry_head *)iter->ent;
event = ftrace_find_event(field->ent.type);
tp = container_of(event, struct trace_probe, event);
tp = container_of(event, struct trace_probe, call.event);
if (!trace_seq_printf(s, "%s: (", tp->call.name))
goto partial;
......@@ -1217,8 +1216,6 @@ static void probe_event_disable(struct ftrace_event_call *call)
static int probe_event_raw_init(struct ftrace_event_call *event_call)
{
INIT_LIST_HEAD(&event_call->fields);
return 0;
}
......@@ -1341,9 +1338,9 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
struct ftrace_event_call *call = &tp->call;
struct kprobe_trace_entry_head *entry;
struct hlist_head *head;
u8 *data;
int size, __size, i;
unsigned long irq_flags;
int rctx;
__size = sizeof(*entry) + tp->size;
......@@ -1353,7 +1350,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
"profile buffer not large enough"))
return;
entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
if (!entry)
return;
......@@ -1362,7 +1359,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
for (i = 0; i < tp->nr_args; i++)
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
head = per_cpu_ptr(call->perf_events, smp_processor_id());
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
}
/* Kretprobe profile handler */
......@@ -1372,9 +1370,9 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
struct ftrace_event_call *call = &tp->call;
struct kretprobe_trace_entry_head *entry;
struct hlist_head *head;
u8 *data;
int size, __size, i;
unsigned long irq_flags;
int rctx;
__size = sizeof(*entry) + tp->size;
......@@ -1384,7 +1382,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
"profile buffer not large enough"))
return;
entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
if (!entry)
return;
......@@ -1394,8 +1392,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
for (i = 0; i < tp->nr_args; i++)
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
irq_flags, regs);
head = per_cpu_ptr(call->perf_events, smp_processor_id());
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
}
static int probe_perf_enable(struct ftrace_event_call *call)
......@@ -1425,6 +1423,26 @@ static void probe_perf_disable(struct ftrace_event_call *call)
}
#endif /* CONFIG_PERF_EVENTS */
static __kprobes
int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
{
switch (type) {
case TRACE_REG_REGISTER:
return probe_event_enable(event);
case TRACE_REG_UNREGISTER:
probe_event_disable(event);
return 0;
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
return probe_perf_enable(event);
case TRACE_REG_PERF_UNREGISTER:
probe_perf_disable(event);
return 0;
#endif
}
return 0;
}
static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
......@@ -1454,6 +1472,14 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
return 0; /* We don't tweek kernel, so just return 0 */
}
static struct trace_event_functions kretprobe_funcs = {
.trace = print_kretprobe_event
};
static struct trace_event_functions kprobe_funcs = {
.trace = print_kprobe_event
};
static int register_probe_event(struct trace_probe *tp)
{
struct ftrace_event_call *call = &tp->call;
......@@ -1461,36 +1487,31 @@ static int register_probe_event(struct trace_probe *tp)
/* Initialize ftrace_event_call */
if (probe_is_return(tp)) {
tp->event.trace = print_kretprobe_event;
call->raw_init = probe_event_raw_init;
call->define_fields = kretprobe_event_define_fields;
INIT_LIST_HEAD(&call->class->fields);
call->event.funcs = &kretprobe_funcs;
call->class->raw_init = probe_event_raw_init;
call->class->define_fields = kretprobe_event_define_fields;
} else {
tp->event.trace = print_kprobe_event;
call->raw_init = probe_event_raw_init;
call->define_fields = kprobe_event_define_fields;
INIT_LIST_HEAD(&call->class->fields);
call->event.funcs = &kprobe_funcs;
call->class->raw_init = probe_event_raw_init;
call->class->define_fields = kprobe_event_define_fields;
}
if (set_print_fmt(tp) < 0)
return -ENOMEM;
call->event = &tp->event;
call->id = register_ftrace_event(&tp->event);
if (!call->id) {
ret = register_ftrace_event(&call->event);
if (!ret) {
kfree(call->print_fmt);
return -ENODEV;
}
call->enabled = 0;
call->regfunc = probe_event_enable;
call->unregfunc = probe_event_disable;
#ifdef CONFIG_PERF_EVENTS
call->perf_event_enable = probe_perf_enable;
call->perf_event_disable = probe_perf_disable;
#endif
call->flags = 0;
call->class->reg = kprobe_register;
call->data = tp;
ret = trace_add_event_call(call);
if (ret) {
pr_info("Failed to register kprobe event: %s\n", call->name);
kfree(call->print_fmt);
unregister_ftrace_event(&tp->event);
unregister_ftrace_event(&call->event);
}
return ret;
}
......
This diff is collapsed.
......@@ -25,7 +25,7 @@ extern void trace_event_read_unlock(void);
extern struct trace_event *ftrace_find_event(int type);
extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
int flags);
int flags, struct trace_event *event);
extern int
trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
......
......@@ -50,7 +50,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
}
static void
probe_sched_switch(struct task_struct *prev, struct task_struct *next)
probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
{
struct trace_array_cpu *data;
unsigned long flags;
......@@ -108,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
}
static void
probe_sched_wakeup(struct task_struct *wakee, int success)
probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
{
struct trace_array_cpu *data;
unsigned long flags;
......@@ -138,21 +138,21 @@ static int tracing_sched_register(void)
{
int ret;
ret = register_trace_sched_wakeup(probe_sched_wakeup);
ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup\n");
return ret;
}
ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup_new\n");
goto fail_deprobe;
}
ret = register_trace_sched_switch(probe_sched_switch);
ret = register_trace_sched_switch(probe_sched_switch, NULL);
if (ret) {
pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n");
......@@ -161,17 +161,17 @@ static int tracing_sched_register(void)
return ret;
fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_sched_wakeup);
unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
fail_deprobe:
unregister_trace_sched_wakeup(probe_sched_wakeup);
unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
return ret;
}
static void tracing_sched_unregister(void)
{
unregister_trace_sched_switch(probe_sched_switch);
unregister_trace_sched_wakeup_new(probe_sched_wakeup);
unregister_trace_sched_wakeup(probe_sched_wakeup);
unregister_trace_sched_switch(probe_sched_switch, NULL);
unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
}
static void tracing_start_sched_switch(void)
......
......@@ -98,7 +98,8 @@ static int report_latency(cycle_t delta)
return 1;
}
static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
static void
probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
{
if (task != wakeup_task)
return;
......@@ -107,7 +108,8 @@ static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
}
static void notrace
probe_wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
probe_wakeup_sched_switch(void *ignore,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array_cpu *data;
cycle_t T0, T1, delta;
......@@ -199,7 +201,7 @@ static void wakeup_reset(struct trace_array *tr)
}
static void
probe_wakeup(struct task_struct *p, int success)
probe_wakeup(void *ignore, struct task_struct *p, int success)
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
......@@ -263,28 +265,28 @@ static void start_wakeup_tracer(struct trace_array *tr)
{
int ret;
ret = register_trace_sched_wakeup(probe_wakeup);
ret = register_trace_sched_wakeup(probe_wakeup, NULL);
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup\n");
return;
}
ret = register_trace_sched_wakeup_new(probe_wakeup);
ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup_new\n");
goto fail_deprobe;
}
ret = register_trace_sched_switch(probe_wakeup_sched_switch);
ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
if (ret) {
pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n");
goto fail_deprobe_wake_new;
}
ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task);
ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_migrate_task\n");
......@@ -311,19 +313,19 @@ static void start_wakeup_tracer(struct trace_array *tr)
return;
fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_wakeup);
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
fail_deprobe:
unregister_trace_sched_wakeup(probe_wakeup);
unregister_trace_sched_wakeup(probe_wakeup, NULL);
}
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
unregister_ftrace_function(&trace_ops);
unregister_trace_sched_switch(probe_wakeup_sched_switch);
unregister_trace_sched_wakeup_new(probe_wakeup);
unregister_trace_sched_wakeup(probe_wakeup);
unregister_trace_sched_migrate_task(probe_wakeup_migrate_task);
unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
unregister_trace_sched_wakeup(probe_wakeup, NULL);
unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
}
static int __wakeup_tracer_init(struct trace_array *tr)
......
This diff is collapsed.
......@@ -49,7 +49,8 @@ static void cpu_workqueue_stat_free(struct kref *kref)
/* Insertion of a work */
static void
probe_workqueue_insertion(struct task_struct *wq_thread,
probe_workqueue_insertion(void *ignore,
struct task_struct *wq_thread,
struct work_struct *work)
{
int cpu = cpumask_first(&wq_thread->cpus_allowed);
......@@ -70,7 +71,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
/* Execution of a work */
static void
probe_workqueue_execution(struct task_struct *wq_thread,
probe_workqueue_execution(void *ignore,
struct task_struct *wq_thread,
struct work_struct *work)
{
int cpu = cpumask_first(&wq_thread->cpus_allowed);
......@@ -90,7 +92,8 @@ probe_workqueue_execution(struct task_struct *wq_thread,
}
/* Creation of a cpu workqueue thread */
static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
static void probe_workqueue_creation(void *ignore,
struct task_struct *wq_thread, int cpu)
{
struct cpu_workqueue_stats *cws;
unsigned long flags;
......@@ -114,7 +117,8 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
}
/* Destruction of a cpu workqueue thread */
static void probe_workqueue_destruction(struct task_struct *wq_thread)
static void
probe_workqueue_destruction(void *ignore, struct task_struct *wq_thread)
{
/* Workqueue only execute on one cpu */
int cpu = cpumask_first(&wq_thread->cpus_allowed);
......@@ -259,19 +263,19 @@ int __init trace_workqueue_early_init(void)
{
int ret, cpu;
ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
if (ret)
goto out;
ret = register_trace_workqueue_execution(probe_workqueue_execution);
ret = register_trace_workqueue_execution(probe_workqueue_execution, NULL);
if (ret)
goto no_insertion;
ret = register_trace_workqueue_creation(probe_workqueue_creation);
ret = register_trace_workqueue_creation(probe_workqueue_creation, NULL);
if (ret)
goto no_execution;
ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
ret = register_trace_workqueue_destruction(probe_workqueue_destruction, NULL);
if (ret)
goto no_creation;
......@@ -283,11 +287,11 @@ int __init trace_workqueue_early_init(void)
return 0;
no_creation:
unregister_trace_workqueue_creation(probe_workqueue_creation);
unregister_trace_workqueue_creation(probe_workqueue_creation, NULL);
no_execution:
unregister_trace_workqueue_execution(probe_workqueue_execution);
unregister_trace_workqueue_execution(probe_workqueue_execution, NULL);
no_insertion:
unregister_trace_workqueue_insertion(probe_workqueue_insertion);
unregister_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
out:
pr_warning("trace_workqueue: unable to trace workqueues\n");
......
......@@ -54,7 +54,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
*/
struct tracepoint_entry {
struct hlist_node hlist;
void **funcs;
struct tracepoint_func *funcs;
int refcount; /* Number of times armed. 0 if disarmed. */
char name[0];
};
......@@ -64,12 +64,12 @@ struct tp_probes {
struct rcu_head rcu;
struct list_head list;
} u;
void *probes[0];
struct tracepoint_func probes[0];
};
static inline void *allocate_probes(int count)
{
struct tp_probes *p = kmalloc(count * sizeof(void *)
struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
+ sizeof(struct tp_probes), GFP_KERNEL);
return p == NULL ? NULL : p->probes;
}
......@@ -79,7 +79,7 @@ static void rcu_free_old_probes(struct rcu_head *head)
kfree(container_of(head, struct tp_probes, u.rcu));
}
static inline void release_probes(void *old)
static inline void release_probes(struct tracepoint_func *old)
{
if (old) {
struct tp_probes *tp_probes = container_of(old,
......@@ -95,15 +95,16 @@ static void debug_print_probes(struct tracepoint_entry *entry)
if (!tracepoint_debug || !entry->funcs)
return;
for (i = 0; entry->funcs[i]; i++)
printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]);
for (i = 0; entry->funcs[i].func; i++)
printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
}
static void *
tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
static struct tracepoint_func *
tracepoint_entry_add_probe(struct tracepoint_entry *entry,
void *probe, void *data)
{
int nr_probes = 0;
void **old, **new;
struct tracepoint_func *old, *new;
WARN_ON(!probe);
......@@ -111,8 +112,9 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
old = entry->funcs;
if (old) {
/* (N -> N+1), (N != 0, 1) probes */
for (nr_probes = 0; old[nr_probes]; nr_probes++)
if (old[nr_probes] == probe)
for (nr_probes = 0; old[nr_probes].func; nr_probes++)
if (old[nr_probes].func == probe &&
old[nr_probes].data == data)
return ERR_PTR(-EEXIST);
}
/* + 2 : one for new probe, one for NULL func */
......@@ -120,9 +122,10 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
if (new == NULL)
return ERR_PTR(-ENOMEM);
if (old)
memcpy(new, old, nr_probes * sizeof(void *));
new[nr_probes] = probe;
new[nr_probes + 1] = NULL;
memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
new[nr_probes].func = probe;
new[nr_probes].data = data;
new[nr_probes + 1].func = NULL;
entry->refcount = nr_probes + 1;
entry->funcs = new;
debug_print_probes(entry);
......@@ -130,10 +133,11 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
}
static void *
tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
void *probe, void *data)
{
int nr_probes = 0, nr_del = 0, i;
void **old, **new;
struct tracepoint_func *old, *new;
old = entry->funcs;
......@@ -142,8 +146,10 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
debug_print_probes(entry);
/* (N -> M), (N > 1, M >= 0) probes */
for (nr_probes = 0; old[nr_probes]; nr_probes++) {
if ((!probe || old[nr_probes] == probe))
for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
if (!probe ||
(old[nr_probes].func == probe &&
old[nr_probes].data == data))
nr_del++;
}
......@@ -160,10 +166,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
new = allocate_probes(nr_probes - nr_del + 1);
if (new == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0; old[i]; i++)
if ((probe && old[i] != probe))
for (i = 0; old[i].func; i++)
if (probe &&
(old[i].func != probe || old[i].data != data))
new[j++] = old[i];
new[nr_probes - nr_del] = NULL;
new[nr_probes - nr_del].func = NULL;
entry->refcount = nr_probes - nr_del;
entry->funcs = new;
}
......@@ -315,18 +322,19 @@ static void tracepoint_update_probes(void)
module_update_tracepoints();
}
static void *tracepoint_add_probe(const char *name, void *probe)
static struct tracepoint_func *
tracepoint_add_probe(const char *name, void *probe, void *data)
{
struct tracepoint_entry *entry;
void *old;
struct tracepoint_func *old;
entry = get_tracepoint(name);
if (!entry) {
entry = add_tracepoint(name);
if (IS_ERR(entry))
return entry;
return (struct tracepoint_func *)entry;
}
old = tracepoint_entry_add_probe(entry, probe);
old = tracepoint_entry_add_probe(entry, probe, data);
if (IS_ERR(old) && !entry->refcount)
remove_tracepoint(entry);
return old;
......@@ -340,12 +348,12 @@ static void *tracepoint_add_probe(const char *name, void *probe)
* Returns 0 if ok, error value on error.
* The probe address must at least be aligned on the architecture pointer size.
*/
int tracepoint_probe_register(const char *name, void *probe)
int tracepoint_probe_register(const char *name, void *probe, void *data)
{
void *old;
struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe);
old = tracepoint_add_probe(name, probe, data);
mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old);
......@@ -356,15 +364,16 @@ int tracepoint_probe_register(const char *name, void *probe)
}
EXPORT_SYMBOL_GPL(tracepoint_probe_register);
static void *tracepoint_remove_probe(const char *name, void *probe)
static struct tracepoint_func *
tracepoint_remove_probe(const char *name, void *probe, void *data)
{
struct tracepoint_entry *entry;
void *old;
struct tracepoint_func *old;
entry = get_tracepoint(name);
if (!entry)
return ERR_PTR(-ENOENT);
old = tracepoint_entry_remove_probe(entry, probe);
old = tracepoint_entry_remove_probe(entry, probe, data);
if (IS_ERR(old))
return old;
if (!entry->refcount)
......@@ -382,12 +391,12 @@ static void *tracepoint_remove_probe(const char *name, void *probe)
* itself uses stop_machine(), which insures that every preempt disabled section
* have finished.
*/
int tracepoint_probe_unregister(const char *name, void *probe)
int tracepoint_probe_unregister(const char *name, void *probe, void *data)
{
void *old;
struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe);
old = tracepoint_remove_probe(name, probe, data);
mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old);
......@@ -418,12 +427,13 @@ static void tracepoint_add_old_probes(void *old)
*
* caller must call tracepoint_probe_update_all()
*/
int tracepoint_probe_register_noupdate(const char *name, void *probe)
int tracepoint_probe_register_noupdate(const char *name, void *probe,
void *data)
{
void *old;
struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe);
old = tracepoint_add_probe(name, probe, data);
if (IS_ERR(old)) {
mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old);
......@@ -441,12 +451,13 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
*
* caller must call tracepoint_probe_update_all()
*/
int tracepoint_probe_unregister_noupdate(const char *name, void *probe)
int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void *data)
{
void *old;
struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe);
old = tracepoint_remove_probe(name, probe, data);
if (IS_ERR(old)) {
mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old);
......
......@@ -172,12 +172,12 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
return;
}
static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
{
trace_drop_common(skb, location);
}
static void trace_napi_poll_hit(struct napi_struct *napi)
static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi)
{
struct dm_hw_stat_delta *new_stat;
......@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state)
switch (state) {
case TRACE_ON:
rc |= register_trace_kfree_skb(trace_kfree_skb_hit);
rc |= register_trace_napi_poll(trace_napi_poll_hit);
rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
break;
case TRACE_OFF:
rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit);
rc |= unregister_trace_napi_poll(trace_napi_poll_hit);
rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
tracepoint_synchronize_unregister();
......
......@@ -7,7 +7,5 @@
DECLARE_TRACE(subsys_event,
TP_PROTO(struct inode *inode, struct file *file),
TP_ARGS(inode, file));
DECLARE_TRACE(subsys_eventb,
TP_PROTO(void),
TP_ARGS());
DECLARE_TRACE_NOARGS(subsys_eventb);
#endif
......@@ -13,7 +13,8 @@
* Here the caller only guarantees locking for struct file and struct inode.
* Locking must therefore be done in the probe to use the dentry.
*/
static void probe_subsys_event(struct inode *inode, struct file *file)
static void probe_subsys_event(void *ignore,
struct inode *inode, struct file *file)
{
path_get(&file->f_path);
dget(file->f_path.dentry);
......@@ -23,7 +24,7 @@ static void probe_subsys_event(struct inode *inode, struct file *file)
path_put(&file->f_path);
}
static void probe_subsys_eventb(void)
static void probe_subsys_eventb(void *ignore)
{
printk(KERN_INFO "Event B is encountered\n");
}
......@@ -32,9 +33,9 @@ static int __init tp_sample_trace_init(void)
{
int ret;
ret = register_trace_subsys_event(probe_subsys_event);
ret = register_trace_subsys_event(probe_subsys_event, NULL);
WARN_ON(ret);
ret = register_trace_subsys_eventb(probe_subsys_eventb);
ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL);
WARN_ON(ret);
return 0;
......@@ -44,8 +45,8 @@ module_init(tp_sample_trace_init);
static void __exit tp_sample_trace_exit(void)
{
unregister_trace_subsys_eventb(probe_subsys_eventb);
unregister_trace_subsys_event(probe_subsys_event);
unregister_trace_subsys_eventb(probe_subsys_eventb, NULL);
unregister_trace_subsys_event(probe_subsys_event, NULL);
tracepoint_synchronize_unregister();
}
......
......@@ -12,7 +12,8 @@
* Here the caller only guarantees locking for struct file and struct inode.
* Locking must therefore be done in the probe to use the dentry.
*/
static void probe_subsys_event(struct inode *inode, struct file *file)
static void probe_subsys_event(void *ignore,
struct inode *inode, struct file *file)
{
printk(KERN_INFO "Event is encountered with inode number %lu\n",
inode->i_ino);
......@@ -22,7 +23,7 @@ static int __init tp_sample_trace_init(void)
{
int ret;
ret = register_trace_subsys_event(probe_subsys_event);
ret = register_trace_subsys_event(probe_subsys_event, NULL);
WARN_ON(ret);
return 0;
......@@ -32,7 +33,7 @@ module_init(tp_sample_trace_init);
static void __exit tp_sample_trace_exit(void)
{
unregister_trace_subsys_event(probe_subsys_event);
unregister_trace_subsys_event(probe_subsys_event, NULL);
tracepoint_synchronize_unregister();
}
......
......@@ -43,6 +43,9 @@ OPTIONS
-c::
scale counter values
-B::
print large numbers with thousands' separators according to locale
EXAMPLES
--------
......
......@@ -277,7 +277,7 @@ static void hist_entry__print_hits(struct hist_entry *self)
printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
}
static void annotate_sym(struct hist_entry *he)
static int hist_entry__tty_annotate(struct hist_entry *he)
{
struct map *map = he->ms.map;
struct dso *dso = map->dso;
......@@ -288,7 +288,7 @@ static void annotate_sym(struct hist_entry *he)
struct objdump_line *pos, *n;
if (hist_entry__annotate(he, &head) < 0)
return;
return -1;
if (full_paths)
d_filename = filename;
......@@ -317,30 +317,59 @@ static void annotate_sym(struct hist_entry *he)
if (print_line)
free_source_line(he, len);
return 0;
}
static void hists__find_annotations(struct hists *self)
{
struct rb_node *nd;
struct rb_node *first = rb_first(&self->entries), *nd = first;
int key = KEY_RIGHT;
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct sym_priv *priv;
if (he->ms.sym == NULL)
continue;
if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
goto find_next;
priv = symbol__priv(he->ms.sym);
if (priv->hist == NULL)
if (priv->hist == NULL) {
find_next:
if (key == KEY_LEFT)
nd = rb_prev(nd);
else
nd = rb_next(nd);
continue;
}
annotate_sym(he);
/*
* Since we have a hist_entry per IP for the same symbol, free
* he->ms.sym->hist to signal we already processed this symbol.
*/
free(priv->hist);
priv->hist = NULL;
if (use_browser) {
key = hist_entry__tui_annotate(he);
if (is_exit_key(key))
break;
switch (key) {
case KEY_RIGHT:
case '\t':
nd = rb_next(nd);
break;
case KEY_LEFT:
if (nd == first)
continue;
nd = rb_prev(nd);
default:
break;
}
} else {
hist_entry__tty_annotate(he);
nd = rb_next(nd);
/*
* Since we have a hist_entry per IP for the same
* symbol, free he->ms.sym->hist to signal we already
* processed this symbol.
*/
free(priv->hist);
priv->hist = NULL;
}
}
}
......@@ -416,6 +445,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
{
argc = parse_options(argc, argv, options, annotate_usage, 0);
setup_browser();
symbol_conf.priv_size = sizeof(struct sym_priv);
symbol_conf.try_vmlinux_path = true;
......@@ -435,8 +466,6 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
sym_hist_filter = argv[0];
}
setup_pager();
if (field_sep && *field_sep == '.') {
pr_err("'.' is the only non valid --field-separator argument\n");
return -1;
......
......@@ -65,8 +65,10 @@ static int parse_probe_event(const char *str)
int ret;
pr_debug("probe-definition(%d): %s\n", params.nevents, str);
if (++params.nevents == MAX_PROBES)
die("Too many probes (> %d) are specified.", MAX_PROBES);
if (++params.nevents == MAX_PROBES) {
pr_err("Too many probes (> %d) were specified.", MAX_PROBES);
return -1;
}
/* Parse a perf-probe command into event */
ret = parse_perf_probe_command(str, pev);
......@@ -84,7 +86,9 @@ static int parse_probe_event_argv(int argc, const char **argv)
len = 0;
for (i = 0; i < argc; i++)
len += strlen(argv[i]) + 1;
buf = xzalloc(len + 1);
buf = zalloc(len + 1);
if (buf == NULL)
return -ENOMEM;
len = 0;
for (i = 0; i < argc; i++)
len += sprintf(&buf[len], "%s ", argv[i]);
......
......@@ -25,6 +25,7 @@
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
enum write_mode_t {
WRITE_FORCE,
......@@ -60,13 +61,8 @@ static bool call_graph = false;
static bool inherit_stat = false;
static bool no_samples = false;
static bool sample_address = false;
static bool multiplex = false;
static int multiplex_fd = -1;
static long samples = 0;
static struct timeval last_read;
static struct timeval this_read;
static u64 bytes_written = 0;
static struct pollfd *event_array;
......@@ -86,7 +82,7 @@ struct mmap_data {
unsigned int prev;
};
static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
static struct mmap_data mmap_array[MAX_NR_CPUS];
static unsigned long mmap_read_head(struct mmap_data *md)
{
......@@ -146,8 +142,6 @@ static void mmap_read(struct mmap_data *md)
void *buf;
int diff;
gettimeofday(&this_read, NULL);
/*
* If we're further behind than half the buffer, there's a chance
* the writer will bite our tail and mess up the samples under us.
......@@ -158,23 +152,13 @@ static void mmap_read(struct mmap_data *md)
*/
diff = head - old;
if (diff < 0) {
struct timeval iv;
unsigned long msecs;
timersub(&this_read, &last_read, &iv);
msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
fprintf(stderr, "WARNING: failed to keep up with mmap data."
" Last read %lu msecs ago.\n", msecs);
fprintf(stderr, "WARNING: failed to keep up with mmap data\n");
/*
* head points to a known good entry, start there.
*/
old = head;
}
last_read = this_read;
if (old != head)
samples++;
......@@ -380,27 +364,30 @@ static void create_counter(int counter, int cpu)
*/
if (group && group_fd == -1)
group_fd = fd[nr_cpu][counter][thread_index];
if (multiplex && multiplex_fd == -1)
multiplex_fd = fd[nr_cpu][counter][thread_index];
if (multiplex && fd[nr_cpu][counter][thread_index] != multiplex_fd) {
ret = ioctl(fd[nr_cpu][counter][thread_index], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
assert(ret != -1);
if (counter || thread_index) {
ret = ioctl(fd[nr_cpu][counter][thread_index],
PERF_EVENT_IOC_SET_OUTPUT,
fd[nr_cpu][0][0]);
if (ret) {
error("failed to set output: %d (%s)\n", errno,
strerror(errno));
exit(-1);
}
} else {
event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index];
event_array[nr_poll].events = POLLIN;
nr_poll++;
mmap_array[nr_cpu][counter][thread_index].counter = counter;
mmap_array[nr_cpu][counter][thread_index].prev = 0;
mmap_array[nr_cpu][counter][thread_index].mask = mmap_pages*page_size - 1;
mmap_array[nr_cpu][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
mmap_array[nr_cpu].counter = counter;
mmap_array[nr_cpu].prev = 0;
mmap_array[nr_cpu].mask = mmap_pages*page_size - 1;
mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0);
if (mmap_array[nr_cpu][counter][thread_index].base == MAP_FAILED) {
if (mmap_array[nr_cpu].base == MAP_FAILED) {
error("failed to mmap with %d (%s)\n", errno, strerror(errno));
exit(-1);
}
event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index];
event_array[nr_poll].events = POLLIN;
nr_poll++;
}
if (filter != NULL) {
......@@ -501,16 +488,11 @@ static struct perf_event_header finished_round_event = {
static void mmap_read_all(void)
{
int i, counter, thread;
int i;
for (i = 0; i < nr_cpu; i++) {
for (counter = 0; counter < nr_counters; counter++) {
for (thread = 0; thread < thread_num; thread++) {
if (mmap_array[i][counter][thread].base)
mmap_read(&mmap_array[i][counter][thread]);
}
}
if (mmap_array[i].base)
mmap_read(&mmap_array[i]);
}
if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
......@@ -834,8 +816,6 @@ static const struct option options[] = {
"Sample addresses"),
OPT_BOOLEAN('n', "no-samples", &no_samples,
"don't sample"),
OPT_BOOLEAN('M', "multiplex", &multiplex,
"multiplex counter output in a single channel"),
OPT_END()
};
......@@ -887,9 +867,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
for (i = 0; i < MAX_NR_CPUS; i++) {
for (j = 0; j < MAX_COUNTERS; j++) {
fd[i][j] = malloc(sizeof(int)*thread_num);
mmap_array[i][j] = zalloc(
sizeof(struct mmap_data)*thread_num);
if (!fd[i][j] || !mmap_array[i][j])
if (!fd[i][j])
return -ENOMEM;
}
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -15,15 +15,15 @@
#include "util/parse-events.h"
#include "util/debugfs.h"
bool use_browser;
const char perf_usage_string[] =
"perf [--version] [--help] COMMAND [ARGS]";
const char perf_more_info_string[] =
"See 'perf help COMMAND' for more information on a specific command.";
int use_browser = -1;
static int use_pager = -1;
struct pager_config {
const char *cmd;
int val;
......@@ -49,6 +49,24 @@ int check_pager_config(const char *cmd)
return c.val;
}
static int tui_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
if (!prefixcmp(var, "tui.") && !strcmp(var + 4, c->cmd))
c->val = perf_config_bool(var, value);
return 0;
}
/* returns 0 for "no tui", 1 for "use tui", and -1 for "not specified" */
static int check_tui_config(const char *cmd)
{
struct pager_config c;
c.cmd = cmd;
c.val = -1;
perf_config(tui_command_config, &c);
return c.val;
}
static void commit_pager_choice(void)
{
switch (use_pager) {
......@@ -255,6 +273,9 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
if (p->option & RUN_SETUP)
prefix = NULL; /* setup_perf_directory(); */
if (use_browser == -1)
use_browser = check_tui_config(p->cmd);
if (use_pager == -1 && p->option & RUN_SETUP)
use_pager = check_pager_config(p->cmd);
if (use_pager == -1 && p->option & USE_PAGER)
......
This diff is collapsed.
This diff is collapsed.
......@@ -5,4 +5,6 @@
extern struct perf_event_ops build_id__mark_dso_hit_ops;
char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment