Commit d0e4120f authored by Robert Richter's avatar Robert Richter

oprofile/x86: reserve counter msrs pairwise

For AMD's and Intel's P6 generic performance counters have pairwise
counter and control msrs. This patch changes the counter reservation
in a way that both msrs must be registered. It joins some counter
loops and also removes the unnecessary NUM_CONTROLS macro in the AMD
implementation.
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
parent 8f5a2dd8
...@@ -30,13 +30,10 @@ ...@@ -30,13 +30,10 @@
#include "op_counter.h" #include "op_counter.h"
#define NUM_COUNTERS 4 #define NUM_COUNTERS 4
#define NUM_CONTROLS 4
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
#define NUM_VIRT_COUNTERS 32 #define NUM_VIRT_COUNTERS 32
#define NUM_VIRT_CONTROLS 32
#else #else
#define NUM_VIRT_COUNTERS NUM_COUNTERS #define NUM_VIRT_COUNTERS NUM_COUNTERS
#define NUM_VIRT_CONTROLS NUM_CONTROLS
#endif #endif
#define OP_EVENT_MASK 0x0FFF #define OP_EVENT_MASK 0x0FFF
...@@ -134,13 +131,15 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) ...@@ -134,13 +131,15 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
int i; int i;
for (i = 0; i < NUM_COUNTERS; i++) { for (i = 0; i < NUM_COUNTERS; i++) {
if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; continue;
} if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
for (i = 0; i < NUM_CONTROLS; i++) { continue;
if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) }
msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; /* both registers must be reserved */
msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
} }
} }
...@@ -160,7 +159,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, ...@@ -160,7 +159,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
} }
/* clear all counters */ /* clear all counters */
for (i = 0; i < NUM_CONTROLS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) {
if (unlikely(!msrs->controls[i].addr)) { if (unlikely(!msrs->controls[i].addr)) {
if (counter_config[i].enabled && !smp_processor_id()) if (counter_config[i].enabled && !smp_processor_id())
/* /*
...@@ -175,12 +174,10 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, ...@@ -175,12 +174,10 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
op_x86_warn_in_use(i); op_x86_warn_in_use(i);
val &= model->reserved; val &= model->reserved;
wrmsrl(msrs->controls[i].addr, val); wrmsrl(msrs->controls[i].addr, val);
} /*
* avoid a false detection of ctr overflows in NMI
/* avoid a false detection of ctr overflows in NMI handler */ * handler
for (i = 0; i < NUM_COUNTERS; ++i) { */
if (unlikely(!msrs->counters[i].addr))
continue;
wrmsrl(msrs->counters[i].addr, -1LL); wrmsrl(msrs->counters[i].addr, -1LL);
} }
...@@ -430,12 +427,10 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) ...@@ -430,12 +427,10 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
int i; int i;
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) {
if (msrs->counters[i].addr) if (!msrs->counters[i].addr)
release_perfctr_nmi(MSR_K7_PERFCTR0 + i); continue;
} release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
for (i = 0; i < NUM_CONTROLS; ++i) { release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
if (msrs->controls[i].addr)
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
} }
} }
...@@ -583,7 +578,7 @@ static void op_amd_exit(void) ...@@ -583,7 +578,7 @@ static void op_amd_exit(void)
struct op_x86_model_spec op_amd_spec = { struct op_x86_model_spec op_amd_spec = {
.num_counters = NUM_COUNTERS, .num_counters = NUM_COUNTERS,
.num_controls = NUM_CONTROLS, .num_controls = NUM_COUNTERS,
.num_virt_counters = NUM_VIRT_COUNTERS, .num_virt_counters = NUM_VIRT_COUNTERS,
.reserved = MSR_AMD_EVENTSEL_RESERVED, .reserved = MSR_AMD_EVENTSEL_RESERVED,
.event_mask = OP_EVENT_MASK, .event_mask = OP_EVENT_MASK,
......
...@@ -35,13 +35,15 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) ...@@ -35,13 +35,15 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
int i; int i;
for (i = 0; i < num_counters; i++) { for (i = 0; i < num_counters; i++) {
if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; continue;
} if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
for (i = 0; i < num_counters; i++) { continue;
if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) }
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; /* both registers must be reserved */
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
} }
} }
...@@ -92,12 +94,10 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, ...@@ -92,12 +94,10 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
op_x86_warn_in_use(i); op_x86_warn_in_use(i);
val &= model->reserved; val &= model->reserved;
wrmsrl(msrs->controls[i].addr, val); wrmsrl(msrs->controls[i].addr, val);
} /*
* avoid a false detection of ctr overflows in NMI *
/* avoid a false detection of ctr overflows in NMI handler */ * handler
for (i = 0; i < num_counters; ++i) { */
if (unlikely(!msrs->counters[i].addr))
continue;
wrmsrl(msrs->counters[i].addr, -1LL); wrmsrl(msrs->counters[i].addr, -1LL);
} }
...@@ -194,12 +194,10 @@ static void ppro_shutdown(struct op_msrs const * const msrs) ...@@ -194,12 +194,10 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
int i; int i;
for (i = 0; i < num_counters; ++i) { for (i = 0; i < num_counters; ++i) {
if (msrs->counters[i].addr) if (!msrs->counters[i].addr)
release_perfctr_nmi(MSR_P6_PERFCTR0 + i); continue;
} release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
for (i = 0; i < num_counters; ++i) { release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
if (msrs->controls[i].addr)
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
} }
if (reset_value) { if (reset_value) {
kfree(reset_value); kfree(reset_value);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment