Commit d25e8dbd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'oprofile-for-linus' of...

Merge branch 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  oprofile/x86: fix msr access to reserved counters
  oprofile/x86: use kzalloc() instead of kmalloc()
  oprofile/x86: fix perfctr nmi reservation for mulitplexing
  oprofile/x86: add comment to counter-in-use warning
  oprofile/x86: warn user if a counter is already active
  oprofile/x86: implement randomization for IBS periodic op counter
  oprofile/x86: implement lsfr pseudo-random number generator for IBS
  oprofile/x86: implement IBS cpuid feature detection
  oprofile/x86: remove node check in AMD IBS initialization
  oprofile/x86: remove OPROFILE_IBS config option
  oprofile: remove EXPERIMENTAL from the config option description
  oprofile: remove tracing build dependency
parents 642c4c75 cfc9c0b4
...@@ -3,11 +3,9 @@ ...@@ -3,11 +3,9 @@
# #
config OPROFILE config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)" tristate "OProfile system profiling"
depends on PROFILING depends on PROFILING
depends on HAVE_OPROFILE depends on HAVE_OPROFILE
depends on TRACING_SUPPORT
select TRACING
select RING_BUFFER select RING_BUFFER
select RING_BUFFER_ALLOW_SWAP select RING_BUFFER_ALLOW_SWAP
help help
...@@ -17,20 +15,6 @@ config OPROFILE ...@@ -17,20 +15,6 @@ config OPROFILE
If unsure, say N. If unsure, say N.
config OPROFILE_IBS
bool "OProfile AMD IBS support (EXPERIMENTAL)"
default n
depends on OPROFILE && SMP && X86
help
Instruction-Based Sampling (IBS) is a new profiling
technique that provides rich, precise program performance
information. IBS is introduced by AMD Family10h processors
(AMD Opteron Quad-Core processor "Barcelona") to overcome
the limitations of conventional performance counter
sampling.
If unsure, say N.
config OPROFILE_EVENT_MULTIPLEX config OPROFILE_EVENT_MULTIPLEX
bool "OProfile multiplexing support (EXPERIMENTAL)" bool "OProfile multiplexing support (EXPERIMENTAL)"
default n default n
......
...@@ -159,7 +159,7 @@ static int nmi_setup_mux(void) ...@@ -159,7 +159,7 @@ static int nmi_setup_mux(void)
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
per_cpu(cpu_msrs, i).multiplex = per_cpu(cpu_msrs, i).multiplex =
kmalloc(multiplex_size, GFP_KERNEL); kzalloc(multiplex_size, GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).multiplex) if (!per_cpu(cpu_msrs, i).multiplex)
return 0; return 0;
} }
...@@ -179,7 +179,6 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) ...@@ -179,7 +179,6 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
if (counter_config[i].enabled) { if (counter_config[i].enabled) {
multiplex[i].saved = -(u64)counter_config[i].count; multiplex[i].saved = -(u64)counter_config[i].count;
} else { } else {
multiplex[i].addr = 0;
multiplex[i].saved = 0; multiplex[i].saved = 0;
} }
} }
...@@ -189,25 +188,27 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) ...@@ -189,25 +188,27 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
{ {
struct op_msr *counters = msrs->counters;
struct op_msr *multiplex = msrs->multiplex; struct op_msr *multiplex = msrs->multiplex;
int i; int i;
for (i = 0; i < model->num_counters; ++i) { for (i = 0; i < model->num_counters; ++i) {
int virt = op_x86_phys_to_virt(i); int virt = op_x86_phys_to_virt(i);
if (multiplex[virt].addr) if (counters[i].addr)
rdmsrl(multiplex[virt].addr, multiplex[virt].saved); rdmsrl(counters[i].addr, multiplex[virt].saved);
} }
} }
static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
{ {
struct op_msr *counters = msrs->counters;
struct op_msr *multiplex = msrs->multiplex; struct op_msr *multiplex = msrs->multiplex;
int i; int i;
for (i = 0; i < model->num_counters; ++i) { for (i = 0; i < model->num_counters; ++i) {
int virt = op_x86_phys_to_virt(i); int virt = op_x86_phys_to_virt(i);
if (multiplex[virt].addr) if (counters[i].addr)
wrmsrl(multiplex[virt].addr, multiplex[virt].saved); wrmsrl(counters[i].addr, multiplex[virt].saved);
} }
} }
...@@ -303,11 +304,11 @@ static int allocate_msrs(void) ...@@ -303,11 +304,11 @@ static int allocate_msrs(void)
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
GFP_KERNEL); GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).counters) if (!per_cpu(cpu_msrs, i).counters)
return 0; return 0;
per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
GFP_KERNEL); GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).controls) if (!per_cpu(cpu_msrs, i).controls)
return 0; return 0;
......
This diff is collapsed.
...@@ -394,12 +394,6 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs) ...@@ -394,12 +394,6 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
setup_num_counters(); setup_num_counters();
stag = get_stagger(); stag = get_stagger();
/* initialize some registers */
for (i = 0; i < num_counters; ++i)
msrs->counters[i].addr = 0;
for (i = 0; i < num_controls; ++i)
msrs->controls[i].addr = 0;
/* the counter & cccr registers we pay attention to */ /* the counter & cccr registers we pay attention to */
for (i = 0; i < num_counters; ++i) { for (i = 0; i < num_counters; ++i) {
addr = p4_counters[VIRT_CTR(stag, i)].counter_address; addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
......
...@@ -37,15 +37,11 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) ...@@ -37,15 +37,11 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
for (i = 0; i < num_counters; i++) { for (i = 0; i < num_counters; i++) {
if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
else
msrs->counters[i].addr = 0;
} }
for (i = 0; i < num_counters; i++) { for (i = 0; i < num_counters; i++) {
if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
else
msrs->controls[i].addr = 0;
} }
} }
...@@ -57,7 +53,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, ...@@ -57,7 +53,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
int i; int i;
if (!reset_value) { if (!reset_value) {
reset_value = kmalloc(sizeof(reset_value[0]) * num_counters, reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
GFP_ATOMIC); GFP_ATOMIC);
if (!reset_value) if (!reset_value)
return; return;
...@@ -82,9 +78,18 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, ...@@ -82,9 +78,18 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
/* clear all counters */ /* clear all counters */
for (i = 0; i < num_counters; ++i) { for (i = 0; i < num_counters; ++i) {
if (unlikely(!msrs->controls[i].addr)) if (unlikely(!msrs->controls[i].addr)) {
if (counter_config[i].enabled && !smp_processor_id())
/*
* counter is reserved, this is on all
* cpus, so report only for cpu #0
*/
op_x86_warn_reserved(i);
continue; continue;
}
rdmsrl(msrs->controls[i].addr, val); rdmsrl(msrs->controls[i].addr, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
op_x86_warn_in_use(i);
val &= model->reserved; val &= model->reserved;
wrmsrl(msrs->controls[i].addr, val); wrmsrl(msrs->controls[i].addr, val);
} }
......
...@@ -57,6 +57,26 @@ struct op_x86_model_spec { ...@@ -57,6 +57,26 @@ struct op_x86_model_spec {
struct op_counter_config; struct op_counter_config;
static inline void op_x86_warn_in_use(int counter)
{
/*
* The warning indicates an already running counter. If
* oprofile doesn't collect data, then try using a different
* performance counter on your platform to monitor the desired
* event. Delete counter #%d from the desired event by editing
* the /usr/share/oprofile/%s/<cpu>/events file. If the event
* cannot be monitored by any other counter, contact your
* hardware or BIOS vendor.
*/
pr_warning("oprofile: counter #%d on cpu #%d may already be used\n",
counter, smp_processor_id());
}
static inline void op_x86_warn_reserved(int counter)
{
pr_warning("oprofile: counter #%d is already reserved\n", counter);
}
extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
struct op_counter_config *counter_config); struct op_counter_config *counter_config);
extern int op_x86_phys_to_virt(int phys); extern int op_x86_phys_to_virt(int phys);
......
...@@ -1128,7 +1128,7 @@ config MMAP_ALLOW_UNINITIALIZED ...@@ -1128,7 +1128,7 @@ config MMAP_ALLOW_UNINITIALIZED
See Documentation/nommu-mmap.txt for more information. See Documentation/nommu-mmap.txt for more information.
config PROFILING config PROFILING
bool "Profiling support (EXPERIMENTAL)" bool "Profiling support"
help help
Say Y here to enable the extended profiling support mechanisms used Say Y here to enable the extended profiling support mechanisms used
by profilers such as OProfile. by profilers such as OProfile.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment