Commit 32673822 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core

Conflicts:
	include/linux/perf_event.h

Merge reason: pick up the latest jump-label enhancements, they are cooked ready.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents fa7b6947 5373db88
...@@ -20,16 +20,18 @@ ...@@ -20,16 +20,18 @@
#define WORD_INSN ".word" #define WORD_INSN ".word"
#endif #endif
#define JUMP_LABEL(key, label) \ static __always_inline bool arch_static_branch(struct jump_label_key *key)
do { \ {
asm goto("1:\tnop\n\t" \ asm goto("1:\tnop\n\t"
"nop\n\t" \ "nop\n\t"
".pushsection __jump_table, \"a\"\n\t" \ ".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[" #label "], %0\n\t" \ WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t" \ ".popsection\n\t"
: : "i" (key) : : label); \ : : "i" (key) : : l_yes);
} while (0) return false;
l_yes:
return true;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -88,6 +88,7 @@ config S390 ...@@ -88,6 +88,7 @@ config S390
select HAVE_KERNEL_XZ select HAVE_KERNEL_XZ
select HAVE_GET_USER_PAGES_FAST select HAVE_GET_USER_PAGES_FAST
select HAVE_ARCH_MUTEX_CPU_RELAX select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK select ARCH_INLINE_SPIN_LOCK
......
#ifndef _ASM_S390_JUMP_LABEL_H
#define _ASM_S390_JUMP_LABEL_H
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 6
#ifdef CONFIG_64BIT
#define ASM_PTR ".quad"
#define ASM_ALIGN ".balign 8"
#else
#define ASM_PTR ".long"
#define ASM_ALIGN ".balign 4"
#endif
static __always_inline bool arch_static_branch(struct jump_label_key *key)
{
asm goto("0: brcl 0,0\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
".popsection\n"
: : "X" (key) : : label);
return false;
label:
return true;
}
typedef unsigned long jump_label_t;
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif
...@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w ...@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
vdso.o vtime.o sysinfo.o nmi.o sclp.o vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
......
/*
* Jump label s390 support
*
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <linux/jump_label.h>
#include <asm/ipl.h>
#ifdef HAVE_JUMP_LABEL
struct insn {
u16 opcode;
s32 offset;
} __packed;
struct insn_args {
unsigned long *target;
struct insn *insn;
ssize_t size;
};
static int __arch_jump_label_transform(void *data)
{
struct insn_args *args = data;
int rc;
rc = probe_kernel_write(args->target, args->insn, args->size);
WARN_ON_ONCE(rc < 0);
return 0;
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
struct insn_args args;
struct insn insn;
if (type == JUMP_LABEL_ENABLE) {
/* brcl 15,offset */
insn.opcode = 0xc0f4;
insn.offset = (entry->target - entry->code) >> 1;
} else {
/* brcl 0,0 */
insn.opcode = 0xc004;
insn.offset = 0;
}
args.target = (void *) entry->code;
args.insn = &insn;
args.size = JUMP_LABEL_NOP_SIZE;
stop_machine(__arch_jump_label_transform, &args, NULL);
}
#endif
...@@ -7,17 +7,20 @@ ...@@ -7,17 +7,20 @@
#define JUMP_LABEL_NOP_SIZE 4 #define JUMP_LABEL_NOP_SIZE 4
#define JUMP_LABEL(key, label) \ static __always_inline bool arch_static_branch(struct jump_label_key *key)
do { \ {
asm goto("1:\n\t" \ asm goto("1:\n\t"
"nop\n\t" \ "nop\n\t"
"nop\n\t" \ "nop\n\t"
".pushsection __jump_table, \"a\"\n\t"\ ".pushsection __jump_table, \"aw\"\n\t"
".align 4\n\t" \ ".align 4\n\t"
".word 1b, %l[" #label "], %c0\n\t" \ ".word 1b, %l[l_yes], %c0\n\t"
".popsection \n\t" \ ".popsection \n\t"
: : "i" (key) : : label);\ : : "i" (key) : : l_yes);
} while (0) return false;
l_yes:
return true;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/jump_label.h>
#include <asm/asm.h> #include <asm/asm.h>
/* /*
...@@ -191,7 +190,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); ...@@ -191,7 +190,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len);
extern void *text_poke_smp(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
extern void text_poke_smp_batch(struct text_poke_param *params, int n); extern void text_poke_smp_batch(struct text_poke_param *params, int n);
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL)
#define IDEAL_NOP_SIZE_5 5 #define IDEAL_NOP_SIZE_5 5
extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
extern void arch_init_ideal_nop5(void); extern void arch_init_ideal_nop5(void);
......
...@@ -5,20 +5,25 @@ ...@@ -5,20 +5,25 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/asm.h>
#define JUMP_LABEL_NOP_SIZE 5 #define JUMP_LABEL_NOP_SIZE 5
# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" #define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
# define JUMP_LABEL(key, label) \ static __always_inline bool arch_static_branch(struct jump_label_key *key)
do { \ {
asm goto("1:" \ asm goto("1:"
JUMP_LABEL_INITIAL_NOP \ JUMP_LABEL_INITIAL_NOP
".pushsection __jump_table, \"aw\" \n\t"\ ".pushsection __jump_table, \"aw\" \n\t"
_ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ _ASM_ALIGN "\n\t"
".popsection \n\t" \ _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
: : "i" (key) : : label); \ ".popsection \n\t"
} while (0) : : "i" (key) : : l_yes);
return false;
l_yes:
return true;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -679,7 +679,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) ...@@ -679,7 +679,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
__stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
} }
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/jump_label.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/page.h> #include <asm/page.h>
......
...@@ -171,6 +171,10 @@ ...@@ -171,6 +171,10 @@
*(__tracepoints) \ *(__tracepoints) \
/* implement dynamic printk debug */ \ /* implement dynamic printk debug */ \
. = ALIGN(8); \ . = ALIGN(8); \
VMLINUX_SYMBOL(__start___jump_table) = .; \
*(__jump_table) \
VMLINUX_SYMBOL(__stop___jump_table) = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \ VMLINUX_SYMBOL(__start___verbose) = .; \
*(__verbose) \ *(__verbose) \
VMLINUX_SYMBOL(__stop___verbose) = .; \ VMLINUX_SYMBOL(__stop___verbose) = .; \
...@@ -228,8 +232,6 @@ ...@@ -228,8 +232,6 @@
\ \
BUG_TABLE \ BUG_TABLE \
\ \
JUMP_TABLE \
\
/* PCI quirks */ \ /* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
...@@ -589,14 +591,6 @@ ...@@ -589,14 +591,6 @@
#define BUG_TABLE #define BUG_TABLE
#endif #endif
#define JUMP_TABLE \
. = ALIGN(8); \
__jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___jump_table) = .; \
*(__jump_table) \
VMLINUX_SYMBOL(__stop___jump_table) = .; \
}
#ifdef CONFIG_PM_TRACE #ifdef CONFIG_PM_TRACE
#define TRACEDATA \ #define TRACEDATA \
. = ALIGN(4); \ . = ALIGN(4); \
......
#ifndef _DYNAMIC_DEBUG_H #ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H
#include <linux/jump_label.h>
/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
* bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
* use independent hash functions, to reduce the chance of false positives. * use independent hash functions, to reduce the chance of false positives.
......
#ifndef _LINUX_JUMP_LABEL_H #ifndef _LINUX_JUMP_LABEL_H
#define _LINUX_JUMP_LABEL_H #define _LINUX_JUMP_LABEL_H
#include <linux/types.h>
#include <linux/compiler.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
struct jump_label_key {
atomic_t enabled;
struct jump_entry *entries;
#ifdef CONFIG_MODULES
struct jump_label_mod *next;
#endif
};
# include <asm/jump_label.h> # include <asm/jump_label.h>
# define HAVE_JUMP_LABEL # define HAVE_JUMP_LABEL
#endif #endif
enum jump_label_type { enum jump_label_type {
JUMP_LABEL_DISABLE = 0,
JUMP_LABEL_ENABLE, JUMP_LABEL_ENABLE,
JUMP_LABEL_DISABLE
}; };
struct module; struct module;
#ifdef HAVE_JUMP_LABEL #ifdef HAVE_JUMP_LABEL
#ifdef CONFIG_MODULES
#define JUMP_LABEL_INIT {{ 0 }, NULL, NULL}
#else
#define JUMP_LABEL_INIT {{ 0 }, NULL}
#endif
static __always_inline bool static_branch(struct jump_label_key *key)
{
return arch_static_branch(key);
}
extern struct jump_entry __start___jump_table[]; extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[]; extern struct jump_entry __stop___jump_table[];
...@@ -23,37 +46,37 @@ extern void jump_label_unlock(void); ...@@ -23,37 +46,37 @@ extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry, extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type); enum jump_label_type type);
extern void arch_jump_label_text_poke_early(jump_label_t addr); extern void arch_jump_label_text_poke_early(jump_label_t addr);
extern void jump_label_update(unsigned long key, enum jump_label_type type);
extern void jump_label_apply_nops(struct module *mod);
extern int jump_label_text_reserved(void *start, void *end); extern int jump_label_text_reserved(void *start, void *end);
extern void jump_label_inc(struct jump_label_key *key);
extern void jump_label_dec(struct jump_label_key *key);
extern bool jump_label_enabled(struct jump_label_key *key);
extern void jump_label_apply_nops(struct module *mod);
#define jump_label_enable(key) \ #else
jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
#define jump_label_disable(key) \ #include <asm/atomic.h>
jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
#else #define JUMP_LABEL_INIT {ATOMIC_INIT(0)}
#define JUMP_LABEL(key, label) \ struct jump_label_key {
do { \ atomic_t enabled;
if (unlikely(*key)) \ };
goto label; \
} while (0)
#define jump_label_enable(cond_var) \ static __always_inline bool static_branch(struct jump_label_key *key)
do { \ {
*(cond_var) = 1; \ if (unlikely(atomic_read(&key->enabled)))
} while (0) return true;
return false;
}
#define jump_label_disable(cond_var) \ static inline void jump_label_inc(struct jump_label_key *key)
do { \ {
*(cond_var) = 0; \ atomic_inc(&key->enabled);
} while (0) }
static inline int jump_label_apply_nops(struct module *mod) static inline void jump_label_dec(struct jump_label_key *key)
{ {
return 0; atomic_dec(&key->enabled);
} }
static inline int jump_label_text_reserved(void *start, void *end) static inline int jump_label_text_reserved(void *start, void *end)
...@@ -64,16 +87,16 @@ static inline int jump_label_text_reserved(void *start, void *end) ...@@ -64,16 +87,16 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {} static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {} static inline void jump_label_unlock(void) {}
#endif static inline bool jump_label_enabled(struct jump_label_key *key)
{
return !!atomic_read(&key->enabled);
}
#define COND_STMT(key, stmt) \ static inline int jump_label_apply_nops(struct module *mod)
do { \ {
__label__ jl_enabled; \ return 0;
JUMP_LABEL(key, jl_enabled); \ }
if (0) { \
jl_enabled: \ #endif
stmt; \
} \
} while (0)
#endif #endif
#ifndef _LINUX_JUMP_LABEL_REF_H
#define _LINUX_JUMP_LABEL_REF_H
#include <linux/jump_label.h>
#include <asm/atomic.h>
#ifdef HAVE_JUMP_LABEL
static inline void jump_label_inc(atomic_t *key)
{
if (atomic_add_return(1, key) == 1)
jump_label_enable(key);
}
static inline void jump_label_dec(atomic_t *key)
{
if (atomic_dec_and_test(key))
jump_label_disable(key);
}
#else /* !HAVE_JUMP_LABEL */
static inline void jump_label_inc(atomic_t *key)
{
atomic_inc(key);
}
static inline void jump_label_dec(atomic_t *key)
{
atomic_dec(key);
}
#undef JUMP_LABEL
#define JUMP_LABEL(key, label) \
do { \
if (unlikely(__builtin_choose_expr( \
__builtin_types_compatible_p(typeof(key), atomic_t *), \
atomic_read((atomic_t *)(key)), *(key)))) \
goto label; \
} while (0)
#endif /* HAVE_JUMP_LABEL */
#endif /* _LINUX_JUMP_LABEL_REF_H */
...@@ -505,7 +505,7 @@ struct perf_guest_info_callbacks { ...@@ -505,7 +505,7 @@ struct perf_guest_info_callbacks {
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/jump_label_ref.h> #include <linux/jump_label.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/local.h> #include <asm/local.h>
...@@ -1034,7 +1034,7 @@ static inline int is_software_event(struct perf_event *event) ...@@ -1034,7 +1034,7 @@ static inline int is_software_event(struct perf_event *event)
return event->pmu->task_ctx_nr == perf_sw_context; return event->pmu->task_ctx_nr == perf_sw_context;
} }
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
...@@ -1063,22 +1063,21 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) ...@@ -1063,22 +1063,21 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
{ {
struct pt_regs hot_regs; struct pt_regs hot_regs;
JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); if (static_branch(&perf_swevent_enabled[event_id])) {
return;
have_event:
if (!regs) { if (!regs) {
perf_fetch_caller_regs(&hot_regs); perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs; regs = &hot_regs;
} }
__perf_sw_event(event_id, nr, nmi, regs, addr); __perf_sw_event(event_id, nr, nmi, regs, addr);
}
} }
extern atomic_t perf_sched_events; extern struct jump_label_key perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *task) static inline void perf_event_task_sched_in(struct task_struct *task)
{ {
COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); if (static_branch(&perf_sched_events))
__perf_event_task_sched_in(task);
} }
static inline static inline
......
...@@ -29,7 +29,7 @@ struct tracepoint_func { ...@@ -29,7 +29,7 @@ struct tracepoint_func {
struct tracepoint { struct tracepoint {
const char *name; /* Tracepoint name */ const char *name; /* Tracepoint name */
int state; /* State. */ struct jump_label_key key;
void (*regfunc)(void); void (*regfunc)(void);
void (*unregfunc)(void); void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs; struct tracepoint_func __rcu *funcs;
...@@ -146,9 +146,7 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, ...@@ -146,9 +146,7 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin,
extern struct tracepoint __tracepoint_##name; \ extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ \ { \
JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ if (static_branch(&__tracepoint_##name.key)) \
return; \
do_trace: \
__DO_TRACE(&__tracepoint_##name, \ __DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args), \ TP_ARGS(data_args), \
...@@ -181,7 +179,7 @@ do_trace: \ ...@@ -181,7 +179,7 @@ do_trace: \
__attribute__((section("__tracepoints_strings"))) = #name; \ __attribute__((section("__tracepoints_strings"))) = #name; \
struct tracepoint __tracepoint_##name \ struct tracepoint __tracepoint_##name \
__attribute__((section("__tracepoints"))) = \ __attribute__((section("__tracepoints"))) = \
{ __tpstrtab_##name, 0, reg, unreg, NULL }; \ { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\
static struct tracepoint * const __tracepoint_ptr_##name __used \ static struct tracepoint * const __tracepoint_ptr_##name __used \
__attribute__((section("__tracepoints_ptrs"))) = \ __attribute__((section("__tracepoints_ptrs"))) = \
&__tracepoint_##name; &__tracepoint_##name;
......
This diff is collapsed.
...@@ -125,7 +125,7 @@ enum event_type_t { ...@@ -125,7 +125,7 @@ enum event_type_t {
* perf_sched_events : >0 events exist * perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/ */
atomic_t perf_sched_events __read_mostly; struct jump_label_key perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_mmap_events __read_mostly;
...@@ -5429,7 +5429,7 @@ static int swevent_hlist_get(struct perf_event *event) ...@@ -5429,7 +5429,7 @@ static int swevent_hlist_get(struct perf_event *event)
return err; return err;
} }
atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
static void sw_perf_event_destroy(struct perf_event *event) static void sw_perf_event_destroy(struct perf_event *event)
{ {
......
...@@ -2013,9 +2013,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) ...@@ -2013,9 +2013,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
{ {
enum print_line_t ret; enum print_line_t ret;
if (iter->lost_events) if (iter->lost_events &&
trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
iter->cpu, iter->lost_events); iter->cpu, iter->lost_events))
return TRACE_TYPE_PARTIAL_LINE;
if (iter->trace && iter->trace->print_line) { if (iter->trace && iter->trace->print_line) {
ret = iter->trace->print_line(iter); ret = iter->trace->print_line(iter);
...@@ -3229,6 +3230,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ...@@ -3229,6 +3230,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
if (iter->seq.len >= cnt) if (iter->seq.len >= cnt)
break; break;
/*
* Setting the full flag means we reached the trace_seq buffer
* size and we should leave by partial output condition above.
* One of the trace_seq_* functions is not used properly.
*/
WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
iter->ent->type);
} }
trace_access_unlock(iter->cpu_file); trace_access_unlock(iter->cpu_file);
trace_event_read_unlock(); trace_event_read_unlock();
......
...@@ -830,6 +830,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); ...@@ -830,6 +830,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event);
enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
struct trace_event *event) struct trace_event *event)
{ {
if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED; return TRACE_TYPE_HANDLED;
} }
......
...@@ -32,7 +32,7 @@ static DEFINE_MUTEX(btrace_mutex); ...@@ -32,7 +32,7 @@ static DEFINE_MUTEX(btrace_mutex);
struct trace_bprintk_fmt { struct trace_bprintk_fmt {
struct list_head list; struct list_head list;
char fmt[0]; const char *fmt;
}; };
static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
...@@ -49,6 +49,7 @@ static ...@@ -49,6 +49,7 @@ static
void hold_module_trace_bprintk_format(const char **start, const char **end) void hold_module_trace_bprintk_format(const char **start, const char **end)
{ {
const char **iter; const char **iter;
char *fmt;
mutex_lock(&btrace_mutex); mutex_lock(&btrace_mutex);
for (iter = start; iter < end; iter++) { for (iter = start; iter < end; iter++) {
...@@ -58,15 +59,19 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) ...@@ -58,15 +59,19 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
continue; continue;
} }
tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt) tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL);
+ strlen(*iter) + 1, GFP_KERNEL); if (tb_fmt)
if (tb_fmt) { fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL);
if (tb_fmt && fmt) {
list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
strcpy(tb_fmt->fmt, *iter); strcpy(fmt, *iter);
tb_fmt->fmt = fmt;
*iter = tb_fmt->fmt; *iter = tb_fmt->fmt;
} else } else {
kfree(tb_fmt);
*iter = NULL; *iter = NULL;
} }
}
mutex_unlock(&btrace_mutex); mutex_unlock(&btrace_mutex);
} }
...@@ -84,6 +89,76 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, ...@@ -84,6 +89,76 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self,
return 0; return 0;
} }
/*
* The debugfs/tracing/printk_formats file maps the addresses with
* the ASCII formats that are used in the bprintk events in the
* buffer. For userspace tools to be able to decode the events from
* the buffer, they need to be able to map the address with the format.
*
* The addresses of the bprintk formats are in their own section
* __trace_printk_fmt. But for modules we copy them into a link list.
* The code to print the formats and their addresses passes around the
* address of the fmt string. If the fmt address passed into the seq
* functions is within the kernel core __trace_printk_fmt section, then
* it simply uses the next pointer in the list.
*
* When the fmt pointer is outside the kernel core __trace_printk_fmt
* section, then we need to read the link list pointers. The trick is
* we pass the address of the string to the seq function just like
* we do for the kernel core formats. To get back the structure that
* holds the format, we simply use containerof() and then go to the
* next format in the list.
*/
static const char **
find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
{
struct trace_bprintk_fmt *mod_fmt;
if (list_empty(&trace_bprintk_fmt_list))
return NULL;
/*
* v will point to the address of the fmt record from t_next
* v will be NULL from t_start.
* If this is the first pointer or called from start
* then we need to walk the list.
*/
if (!v || start_index == *pos) {
struct trace_bprintk_fmt *p;
/* search the module list */
list_for_each_entry(p, &trace_bprintk_fmt_list, list) {
if (start_index == *pos)
return &p->fmt;
start_index++;
}
/* pos > index */
return NULL;
}
/*
* v points to the address of the fmt field in the mod list
* structure that holds the module print format.
*/
mod_fmt = container_of(v, typeof(*mod_fmt), fmt);
if (mod_fmt->list.next == &trace_bprintk_fmt_list)
return NULL;
mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list);
return &mod_fmt->fmt;
}
static void format_mod_start(void)
{
mutex_lock(&btrace_mutex);
}
static void format_mod_stop(void)
{
mutex_unlock(&btrace_mutex);
}
#else /* !CONFIG_MODULES */ #else /* !CONFIG_MODULES */
__init static int __init static int
module_trace_bprintk_format_notify(struct notifier_block *self, module_trace_bprintk_format_notify(struct notifier_block *self,
...@@ -91,6 +166,13 @@ module_trace_bprintk_format_notify(struct notifier_block *self, ...@@ -91,6 +166,13 @@ module_trace_bprintk_format_notify(struct notifier_block *self,
{ {
return 0; return 0;
} }
static inline const char **
find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
{
return NULL;
}
static inline void format_mod_start(void) { }
static inline void format_mod_stop(void) { }
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
...@@ -153,20 +235,33 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) ...@@ -153,20 +235,33 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
} }
EXPORT_SYMBOL_GPL(__ftrace_vprintk); EXPORT_SYMBOL_GPL(__ftrace_vprintk);
static void * static const char **find_next(void *v, loff_t *pos)
t_start(struct seq_file *m, loff_t *pos)
{ {
const char **fmt = __start___trace_bprintk_fmt + *pos; const char **fmt = v;
int start_index;
if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) if (!fmt)
return NULL; fmt = __start___trace_bprintk_fmt + *pos;
start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
if (*pos < start_index)
return fmt; return fmt;
return find_next_mod_format(start_index, v, fmt, pos);
}
static void *
t_start(struct seq_file *m, loff_t *pos)
{
format_mod_start();
return find_next(NULL, pos);
} }
static void *t_next(struct seq_file *m, void * v, loff_t *pos) static void *t_next(struct seq_file *m, void * v, loff_t *pos)
{ {
(*pos)++; (*pos)++;
return t_start(m, pos); return find_next(v, pos);
} }
static int t_show(struct seq_file *m, void *v) static int t_show(struct seq_file *m, void *v)
...@@ -205,6 +300,7 @@ static int t_show(struct seq_file *m, void *v) ...@@ -205,6 +300,7 @@ static int t_show(struct seq_file *m, void *v)
static void t_stop(struct seq_file *m, void *p) static void t_stop(struct seq_file *m, void *p)
{ {
format_mod_stop();
} }
static const struct seq_operations show_format_seq_ops = { static const struct seq_operations show_format_seq_ops = {
......
...@@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, ...@@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry,
{ {
WARN_ON(strcmp((*entry)->name, elem->name) != 0); WARN_ON(strcmp((*entry)->name, elem->name) != 0);
if (elem->regfunc && !elem->state && active) if (elem->regfunc && !jump_label_enabled(&elem->key) && active)
elem->regfunc(); elem->regfunc();
else if (elem->unregfunc && elem->state && !active) else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active)
elem->unregfunc(); elem->unregfunc();
/* /*
...@@ -264,13 +264,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, ...@@ -264,13 +264,10 @@ static void set_tracepoint(struct tracepoint_entry **entry,
* is used. * is used.
*/ */
rcu_assign_pointer(elem->funcs, (*entry)->funcs); rcu_assign_pointer(elem->funcs, (*entry)->funcs);
if (!elem->state && active) { if (active && !jump_label_enabled(&elem->key))
jump_label_enable(&elem->state); jump_label_inc(&elem->key);
elem->state = active; else if (!active && jump_label_enabled(&elem->key))
} else if (elem->state && !active) { jump_label_dec(&elem->key);
jump_label_disable(&elem->state);
elem->state = active;
}
} }
/* /*
...@@ -281,13 +278,11 @@ static void set_tracepoint(struct tracepoint_entry **entry, ...@@ -281,13 +278,11 @@ static void set_tracepoint(struct tracepoint_entry **entry,
*/ */
static void disable_tracepoint(struct tracepoint *elem) static void disable_tracepoint(struct tracepoint *elem)
{ {
if (elem->unregfunc && elem->state) if (elem->unregfunc && jump_label_enabled(&elem->key))
elem->unregfunc(); elem->unregfunc();
if (elem->state) { if (jump_label_enabled(&elem->key))
jump_label_disable(&elem->state); jump_label_dec(&elem->key);
elem->state = 0;
}
rcu_assign_pointer(elem->funcs, NULL); rcu_assign_pointer(elem->funcs, NULL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment