Commit d8471ad3 authored by Robert Richter's avatar Robert Richter

oprofile: Introduce op_x86_phys_to_virt()

This new function translates physical to virtual counter numbers.
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
parent a5659d17
...@@ -27,12 +27,6 @@ ...@@ -27,12 +27,6 @@
#include "op_counter.h" #include "op_counter.h"
#include "op_x86_model.h" #include "op_x86_model.h"
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
DEFINE_PER_CPU(int, switch_index);
#endif
static struct op_x86_model_spec const *model; static struct op_x86_model_spec const *model;
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
static DEFINE_PER_CPU(unsigned long, saved_lvtpc); static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
...@@ -103,6 +97,21 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) ...@@ -103,6 +97,21 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
} }
} }
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static DEFINE_PER_CPU(int, switch_index);
inline int op_x86_phys_to_virt(int phys)
{
return __get_cpu_var(switch_index) + phys;
}
#else
inline int op_x86_phys_to_virt(int phys) { return phys; }
#endif
static void free_msrs(void) static void free_msrs(void)
{ {
int i; int i;
...@@ -248,31 +257,25 @@ static int nmi_setup(void) ...@@ -248,31 +257,25 @@ static int nmi_setup(void)
static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
{ {
unsigned int si = __get_cpu_var(switch_index);
struct op_msr *multiplex = msrs->multiplex; struct op_msr *multiplex = msrs->multiplex;
unsigned int i; int i;
for (i = 0; i < model->num_counters; ++i) { for (i = 0; i < model->num_counters; ++i) {
int offset = i + si; int virt = op_x86_phys_to_virt(i);
if (multiplex[offset].addr) { if (multiplex[virt].addr)
rdmsrl(multiplex[offset].addr, rdmsrl(multiplex[virt].addr, multiplex[virt].saved);
multiplex[offset].saved);
}
} }
} }
static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
{ {
unsigned int si = __get_cpu_var(switch_index);
struct op_msr *multiplex = msrs->multiplex; struct op_msr *multiplex = msrs->multiplex;
unsigned int i; int i;
for (i = 0; i < model->num_counters; ++i) { for (i = 0; i < model->num_counters; ++i) {
int offset = i + si; int virt = op_x86_phys_to_virt(i);
if (multiplex[offset].addr) { if (multiplex[virt].addr)
wrmsrl(multiplex[offset].addr, wrmsrl(multiplex[virt].addr, multiplex[virt].saved);
multiplex[offset].saved);
}
} }
} }
......
...@@ -42,9 +42,6 @@ ...@@ -42,9 +42,6 @@
#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
static unsigned long reset_value[NUM_VIRT_COUNTERS]; static unsigned long reset_value[NUM_VIRT_COUNTERS];
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
DECLARE_PER_CPU(int, switch_index);
#endif
#ifdef CONFIG_OPROFILE_IBS #ifdef CONFIG_OPROFILE_IBS
...@@ -141,22 +138,21 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, ...@@ -141,22 +138,21 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
/* enable active counters */ /* enable active counters */
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) {
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX int virt = op_x86_phys_to_virt(i);
int offset = i + __get_cpu_var(switch_index); if (!counter_config[virt].enabled)
#else continue;
int offset = i; if (!msrs->counters[i].addr)
#endif continue;
if (counter_config[offset].enabled && msrs->counters[i].addr) {
/* setup counter registers */ /* setup counter registers */
wrmsrl(msrs->counters[i].addr, -(u64)reset_value[offset]); wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
/* setup control registers */ /* setup control registers */
rdmsrl(msrs->controls[i].addr, val); rdmsrl(msrs->controls[i].addr, val);
val &= model->reserved; val &= model->reserved;
val |= op_x86_get_ctrl(model, &counter_config[offset]); val |= op_x86_get_ctrl(model, &counter_config[virt]);
wrmsrl(msrs->controls[i].addr, val); wrmsrl(msrs->controls[i].addr, val);
} }
}
} }
...@@ -170,15 +166,14 @@ static void op_amd_switch_ctrl(struct op_x86_model_spec const *model, ...@@ -170,15 +166,14 @@ static void op_amd_switch_ctrl(struct op_x86_model_spec const *model,
/* enable active counters */ /* enable active counters */
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) {
int offset = i + __get_cpu_var(switch_index); int virt = op_x86_phys_to_virt(i);
if (counter_config[offset].enabled) { if (!counter_config[virt].enabled)
/* setup control registers */ continue;
rdmsrl(msrs->controls[i].addr, val); rdmsrl(msrs->controls[i].addr, val);
val &= model->reserved; val &= model->reserved;
val |= op_x86_get_ctrl(model, &counter_config[offset]); val |= op_x86_get_ctrl(model, &counter_config[virt]);
wrmsrl(msrs->controls[i].addr, val); wrmsrl(msrs->controls[i].addr, val);
} }
}
} }
#endif #endif
...@@ -292,19 +287,15 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, ...@@ -292,19 +287,15 @@ static int op_amd_check_ctrs(struct pt_regs * const regs,
int i; int i;
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) {
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX int virt = op_x86_phys_to_virt(i);
int offset = i + __get_cpu_var(switch_index); if (!reset_value[virt])
#else
int offset = i;
#endif
if (!reset_value[offset])
continue; continue;
rdmsrl(msrs->counters[i].addr, val); rdmsrl(msrs->counters[i].addr, val);
/* bit is clear if overflowed: */ /* bit is clear if overflowed: */
if (val & OP_CTR_OVERFLOW) if (val & OP_CTR_OVERFLOW)
continue; continue;
oprofile_add_sample(regs, offset); oprofile_add_sample(regs, virt);
wrmsrl(msrs->counters[i].addr, -(u64)reset_value[offset]); wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
} }
op_amd_handle_ibs(regs, msrs); op_amd_handle_ibs(regs, msrs);
...@@ -319,17 +310,12 @@ static void op_amd_start(struct op_msrs const * const msrs) ...@@ -319,17 +310,12 @@ static void op_amd_start(struct op_msrs const * const msrs)
int i; int i;
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) {
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX if (!reset_value[op_x86_phys_to_virt(i)])
int offset = i + __get_cpu_var(switch_index); continue;
#else
int offset = i;
#endif
if (reset_value[offset]) {
rdmsrl(msrs->controls[i].addr, val); rdmsrl(msrs->controls[i].addr, val);
val |= ARCH_PERFMON_EVENTSEL0_ENABLE; val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(msrs->controls[i].addr, val); wrmsrl(msrs->controls[i].addr, val);
} }
}
op_amd_start_ibs(); op_amd_start_ibs();
} }
...@@ -344,11 +330,7 @@ static void op_amd_stop(struct op_msrs const * const msrs) ...@@ -344,11 +330,7 @@ static void op_amd_stop(struct op_msrs const * const msrs)
* pm callback * pm callback
*/ */
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < NUM_COUNTERS; ++i) {
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX if (!reset_value[op_x86_phys_to_virt(i)])
if (!reset_value[i + per_cpu(switch_index, smp_processor_id())])
#else
if (!reset_value[i])
#endif
continue; continue;
rdmsrl(msrs->controls[i].addr, val); rdmsrl(msrs->controls[i].addr, val);
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
......
...@@ -60,6 +60,7 @@ struct op_counter_config; ...@@ -60,6 +60,7 @@ struct op_counter_config;
extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
struct op_counter_config *counter_config); struct op_counter_config *counter_config);
extern int op_x86_phys_to_virt(int phys);
extern struct op_x86_model_spec const op_ppro_spec; extern struct op_x86_model_spec const op_ppro_spec;
extern struct op_x86_model_spec const op_p4_spec; extern struct op_x86_model_spec const op_p4_spec;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment