Commit bf926731 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

perf/x86/intel/cqm: Add storage for 'closid' and clean up 'struct intel_pqr_state'

'closid' (CLass Of Service ID) is used for the Class based Cache
Allocation Technology (CAT). Add explicit storage to the per cpu cache
for it, so it can be used later with the CAT support (requires to move
the per cpu data).

While at it:

 - Rename the structure to intel_pqr_state which reflects the actual
   purpose of the struct: cache values which go into the PQR MSR

 - Rename 'cnt' to rmid_usecnt which reflects the actual purpose of
   the counter.

 - Document the structure and the struct members.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarMatt Fleming <matt.fleming@intel.com>
Cc: Kanaka Juvva <kanaka.d.juvva@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Vikas Shivappa <vikas.shivappa@linux.intel.com>
Cc: Will Auld <will.auld@intel.com>
Link: http://lkml.kernel.org/r/20150518235150.240899319@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 43d0c2f6
...@@ -16,18 +16,32 @@ ...@@ -16,18 +16,32 @@
static unsigned int cqm_max_rmid = -1; static unsigned int cqm_max_rmid = -1;
static unsigned int cqm_l3_scale; /* supposedly cacheline size */ static unsigned int cqm_l3_scale; /* supposedly cacheline size */
struct intel_cqm_state { /**
* struct intel_pqr_state - State cache for the PQR MSR
* @rmid: The cached Resource Monitoring ID
* @closid: The cached Class Of Service ID
* @rmid_usecnt: The usage counter for rmid
*
* The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
* lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
* contains both parts, so we need to cache them.
*
* The cache also helps to avoid pointless updates if the value does
* not change.
*/
struct intel_pqr_state {
u32 rmid; u32 rmid;
int cnt; u32 closid;
int rmid_usecnt;
}; };
/* /*
* The cached intel_cqm_state is strictly per CPU and can never be * The cached intel_pqr_state is strictly per CPU and can never be
* updated from a remote CPU. Both functions which modify the state * updated from a remote CPU. Both functions which modify the state
* (intel_cqm_event_start and intel_cqm_event_stop) are called with * (intel_cqm_event_start and intel_cqm_event_stop) are called with
* interrupts disabled, which is sufficient for the protection. * interrupts disabled, which is sufficient for the protection.
*/ */
static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state); static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
/* /*
* Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru. * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
...@@ -966,7 +980,7 @@ static u64 intel_cqm_event_count(struct perf_event *event) ...@@ -966,7 +980,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
static void intel_cqm_event_start(struct perf_event *event, int mode) static void intel_cqm_event_start(struct perf_event *event, int mode)
{ {
struct intel_cqm_state *state = this_cpu_ptr(&cqm_state); struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 rmid = event->hw.cqm_rmid; u32 rmid = event->hw.cqm_rmid;
if (!(event->hw.cqm_state & PERF_HES_STOPPED)) if (!(event->hw.cqm_state & PERF_HES_STOPPED))
...@@ -974,7 +988,7 @@ static void intel_cqm_event_start(struct perf_event *event, int mode) ...@@ -974,7 +988,7 @@ static void intel_cqm_event_start(struct perf_event *event, int mode)
event->hw.cqm_state &= ~PERF_HES_STOPPED; event->hw.cqm_state &= ~PERF_HES_STOPPED;
if (state->cnt++) { if (state->rmid_usecnt++) {
if (!WARN_ON_ONCE(state->rmid != rmid)) if (!WARN_ON_ONCE(state->rmid != rmid))
return; return;
} else { } else {
...@@ -982,17 +996,12 @@ static void intel_cqm_event_start(struct perf_event *event, int mode) ...@@ -982,17 +996,12 @@ static void intel_cqm_event_start(struct perf_event *event, int mode)
} }
state->rmid = rmid; state->rmid = rmid;
/* wrmsr(MSR_IA32_PQR_ASSOC, rmid, state->closid);
* This is actually wrong, as the upper 32 bit MSR contain the
* closid which is used for configuring the Cache Allocation
* Technology component.
*/
wrmsr(MSR_IA32_PQR_ASSOC, rmid, 0);
} }
static void intel_cqm_event_stop(struct perf_event *event, int mode) static void intel_cqm_event_stop(struct perf_event *event, int mode)
{ {
struct intel_cqm_state *state = this_cpu_ptr(&cqm_state); struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
if (event->hw.cqm_state & PERF_HES_STOPPED) if (event->hw.cqm_state & PERF_HES_STOPPED)
return; return;
...@@ -1001,15 +1010,9 @@ static void intel_cqm_event_stop(struct perf_event *event, int mode) ...@@ -1001,15 +1010,9 @@ static void intel_cqm_event_stop(struct perf_event *event, int mode)
intel_cqm_event_read(event); intel_cqm_event_read(event);
if (!--state->cnt) { if (!--state->rmid_usecnt) {
state->rmid = 0; state->rmid = 0;
/* wrmsr(MSR_IA32_PQR_ASSOC, 0, state->closid);
* This is actually wrong, as the upper 32 bit of the
* MSR contain the closid which is used for
* configuring the Cache Allocation Technology
* component.
*/
wrmsr(MSR_IA32_PQR_ASSOC, 0, 0);
} else { } else {
WARN_ON_ONCE(!state->rmid); WARN_ON_ONCE(!state->rmid);
} }
...@@ -1247,11 +1250,12 @@ static inline void cqm_pick_event_reader(int cpu) ...@@ -1247,11 +1250,12 @@ static inline void cqm_pick_event_reader(int cpu)
static void intel_cqm_cpu_prepare(unsigned int cpu) static void intel_cqm_cpu_prepare(unsigned int cpu)
{ {
struct intel_cqm_state *state = &per_cpu(cqm_state, cpu); struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
state->rmid = 0; state->rmid = 0;
state->cnt = 0; state->closid = 0;
state->rmid_usecnt = 0;
WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid); WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale); WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment