Commit 57c0c15b authored by Ingo Molnar's avatar Ingo Molnar

perf: Tidy up after the big rename

 - provide compatibility Kconfig entry for existing PERF_COUNTERS .config's

 - provide courtesy copy of old perf_counter.h, for user-space projects

 - small indentation fixups

 - fix up MAINTAINERS

 - fix small x86 printout fallout

 - fix up small PowerPC comment fallout (use 'counter' as in register)
Reviewed-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cdd6c482
...@@ -4000,7 +4000,7 @@ S: Maintained ...@@ -4000,7 +4000,7 @@ S: Maintained
F: include/linux/delayacct.h F: include/linux/delayacct.h
F: kernel/delayacct.c F: kernel/delayacct.c
PERFORMANCE COUNTER SUBSYSTEM PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <a.p.zijlstra@chello.nl> M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org> M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu> M: Ingo Molnar <mingo@elte.hu>
......
...@@ -41,7 +41,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); ...@@ -41,7 +41,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu; struct power_pmu *ppmu;
/* /*
* Normally, to ignore kernel events we set the FCS (freeze events * Normally, to ignore kernel events we set the FCS (freeze counters
* in supervisor mode) bit in MMCR0, but if the kernel runs with the * in supervisor mode) bit in MMCR0, but if the kernel runs with the
* hypervisor bit set in the MSR, or if we are running on a processor * hypervisor bit set in the MSR, or if we are running on a processor
* where the hypervisor bit is forced to 1 (as on Apple G5 processors), * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
...@@ -159,7 +159,7 @@ void perf_event_print_debug(void) ...@@ -159,7 +159,7 @@ void perf_event_print_debug(void)
} }
/* /*
* Read one performance monitor event (PMC). * Read one performance monitor counter (PMC).
*/ */
static unsigned long read_pmc(int idx) static unsigned long read_pmc(int idx)
{ {
...@@ -409,7 +409,7 @@ static void power_pmu_read(struct perf_event *event) ...@@ -409,7 +409,7 @@ static void power_pmu_read(struct perf_event *event)
val = read_pmc(event->hw.idx); val = read_pmc(event->hw.idx);
} while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The events are only 32 bits wide */ /* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful; delta = (val - prev) & 0xfffffffful;
atomic64_add(delta, &event->count); atomic64_add(delta, &event->count);
atomic64_sub(delta, &event->hw.period_left); atomic64_sub(delta, &event->hw.period_left);
...@@ -543,7 +543,7 @@ void hw_perf_disable(void) ...@@ -543,7 +543,7 @@ void hw_perf_disable(void)
} }
/* /*
* Set the 'freeze events' bit. * Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been * The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events * executed and the PMU has frozen the events
* before we return. * before we return.
...@@ -1124,7 +1124,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -1124,7 +1124,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
} }
/* /*
* A event has overflowed; update its count and record * A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled * things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted. * here so there is no possibility of being interrupted.
*/ */
...@@ -1271,7 +1271,7 @@ static void perf_event_interrupt(struct pt_regs *regs) ...@@ -1271,7 +1271,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
/* /*
* Reset MMCR0 to its normal value. This will set PMXE and * Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze events) and PMAO (perf mon alert occurred) * clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again. * and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until * XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt. * we get back out of this interrupt.
......
...@@ -2083,7 +2083,7 @@ void __init init_hw_perf_events(void) ...@@ -2083,7 +2083,7 @@ void __init init_hw_perf_events(void)
pr_info("... version: %d\n", x86_pmu.version); pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.event_bits); pr_info("... bit width: %d\n", x86_pmu.event_bits);
pr_info("... generic events: %d\n", x86_pmu.num_events); pr_info("... generic registers: %d\n", x86_pmu.num_events);
pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period); pr_info("... max period: %016Lx\n", x86_pmu.max_period);
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
......
This diff is collapsed.
...@@ -395,7 +395,7 @@ enum perf_event_type { ...@@ -395,7 +395,7 @@ enum perf_event_type {
* # * #
* # That is, the ABI doesn't make any promises wrt to * # That is, the ABI doesn't make any promises wrt to
* # the stability of its content, it may vary depending * # the stability of its content, it may vary depending
* # on event_id, hardware, kernel version and phase of * # on event, hardware, kernel version and phase of
* # the moon. * # the moon.
* # * #
* # In other words, PERF_SAMPLE_RAW contents are not an ABI. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
......
...@@ -920,26 +920,31 @@ config HAVE_PERF_EVENTS ...@@ -920,26 +920,31 @@ config HAVE_PERF_EVENTS
help help
See tools/perf/design.txt for details. See tools/perf/design.txt for details.
menu "Performance Counters" menu "Kernel Performance Events And Counters"
config PERF_EVENTS config PERF_EVENTS
bool "Kernel Performance Counters" bool "Kernel performance events and counters"
default y if PROFILING default y if (PROFILING || PERF_COUNTERS)
depends on HAVE_PERF_EVENTS depends on HAVE_PERF_EVENTS
select ANON_INODES select ANON_INODES
help help
Enable kernel support for performance counter hardware. Enable kernel support for various performance events provided
by software and hardware.
Performance counters are special hardware registers available Software events are supported either build-in or via the
on most modern CPUs. These registers count the number of certain use of generic tracepoints.
Most modern CPUs support performance events via performance
counter registers. These registers count the number of certain
types of hw events: such as instructions executed, cachemisses types of hw events: such as instructions executed, cachemisses
suffered, or branches mis-predicted - without slowing down the suffered, or branches mis-predicted - without slowing down the
kernel or applications. These registers can also trigger interrupts kernel or applications. These registers can also trigger interrupts
when a threshold number of events have passed - and can thus be when a threshold number of events have passed - and can thus be
used to profile the code that runs on that CPU. used to profile the code that runs on that CPU.
The Linux Performance Counter subsystem provides an abstraction of The Linux Performance Event subsystem provides an abstraction of
these hardware capabilities, available via a system call. It these software and hardware cevent apabilities, available via a
system call and used by the "perf" utility in tools/perf/. It
provides per task and per CPU counters, and it provides event provides per task and per CPU counters, and it provides event
capabilities on top of those. capabilities on top of those.
...@@ -950,14 +955,26 @@ config EVENT_PROFILE ...@@ -950,14 +955,26 @@ config EVENT_PROFILE
depends on PERF_EVENTS && EVENT_TRACING depends on PERF_EVENTS && EVENT_TRACING
default y default y
help help
Allow the use of tracepoints as software performance counters. Allow the use of tracepoints as software performance events.
When this is enabled, you can create perf counters based on When this is enabled, you can create perf events based on
tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
found in debugfs://tracing/events/*/*/id. (The -e/--events found in debugfs://tracing/events/*/*/id. (The -e/--events
option to the perf tool can parse and interpret symbolic option to the perf tool can parse and interpret symbolic
tracepoints, in the subsystem:tracepoint_name format.) tracepoints, in the subsystem:tracepoint_name format.)
config PERF_COUNTERS
bool "Kernel performance counters (old config option)"
depends on HAVE_PERF_EVENTS
help
This config has been obsoleted by the PERF_EVENTS
config option - please see that one for details.
It has no effect on the kernel whether you enable
it or not, it is a compatibility placeholder.
Say N if unsure.
endmenu endmenu
config VM_EVENT_COUNTERS config VM_EVENT_COUNTERS
......
/* /*
* Performance event core code * Performance events core code:
* *
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment