Commit 8a6d2f8f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

perf/x86/intel/rapl: Utilize event->pmu_private

Store the PMU pointer in event->pmu_private and use it instead of the per CPU
data. Preparatory step to get rid of the per CPU allocations. The usage sites
are the perf fast path, so we keep that even after the conversion to per
package storage as a CPU to package lookup involves 3 loads versus 1 with the
pmu_private pointer.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Harish Chegondi <harish.chegondi@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20160222221012.748151799@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a208749c
...@@ -122,6 +122,7 @@ static struct perf_pmu_events_attr event_attr_##v = { \ ...@@ -122,6 +122,7 @@ static struct perf_pmu_events_attr event_attr_##v = { \
struct rapl_pmu { struct rapl_pmu {
raw_spinlock_t lock; raw_spinlock_t lock;
int n_active; int n_active;
int cpu;
struct list_head active_list; struct list_head active_list;
struct pmu *pmu; struct pmu *pmu;
ktime_t timer_interval; ktime_t timer_interval;
...@@ -203,7 +204,7 @@ static void rapl_start_hrtimer(struct rapl_pmu *pmu) ...@@ -203,7 +204,7 @@ static void rapl_start_hrtimer(struct rapl_pmu *pmu)
static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
{ {
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
struct perf_event *event; struct perf_event *event;
unsigned long flags; unsigned long flags;
...@@ -249,7 +250,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu, ...@@ -249,7 +250,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
static void rapl_pmu_event_start(struct perf_event *event, int mode) static void rapl_pmu_event_start(struct perf_event *event, int mode)
{ {
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); struct rapl_pmu *pmu = event->pmu_private;
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&pmu->lock, flags); raw_spin_lock_irqsave(&pmu->lock, flags);
...@@ -259,7 +260,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode) ...@@ -259,7 +260,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
static void rapl_pmu_event_stop(struct perf_event *event, int mode) static void rapl_pmu_event_stop(struct perf_event *event, int mode)
{ {
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); struct rapl_pmu *pmu = event->pmu_private;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
unsigned long flags; unsigned long flags;
...@@ -293,7 +294,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) ...@@ -293,7 +294,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
static int rapl_pmu_event_add(struct perf_event *event, int mode) static int rapl_pmu_event_add(struct perf_event *event, int mode)
{ {
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); struct rapl_pmu *pmu = event->pmu_private;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
unsigned long flags; unsigned long flags;
...@@ -316,6 +317,7 @@ static void rapl_pmu_event_del(struct perf_event *event, int flags) ...@@ -316,6 +317,7 @@ static void rapl_pmu_event_del(struct perf_event *event, int flags)
static int rapl_pmu_event_init(struct perf_event *event) static int rapl_pmu_event_init(struct perf_event *event)
{ {
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
u64 cfg = event->attr.config & RAPL_EVENT_MASK; u64 cfg = event->attr.config & RAPL_EVENT_MASK;
int bit, msr, ret = 0; int bit, msr, ret = 0;
...@@ -327,6 +329,9 @@ static int rapl_pmu_event_init(struct perf_event *event) ...@@ -327,6 +329,9 @@ static int rapl_pmu_event_init(struct perf_event *event)
if (event->attr.config & ~RAPL_EVENT_MASK) if (event->attr.config & ~RAPL_EVENT_MASK)
return -EINVAL; return -EINVAL;
if (event->cpu < 0)
return -EINVAL;
/* /*
* check event is known (determines counter) * check event is known (determines counter)
*/ */
...@@ -365,6 +370,8 @@ static int rapl_pmu_event_init(struct perf_event *event) ...@@ -365,6 +370,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
return -EINVAL; return -EINVAL;
/* must be done before validate_group */ /* must be done before validate_group */
event->cpu = pmu->cpu;
event->pmu_private = pmu;
event->hw.event_base = msr; event->hw.event_base = msr;
event->hw.config = cfg; event->hw.config = cfg;
event->hw.idx = bit; event->hw.idx = bit;
...@@ -572,6 +579,7 @@ static int rapl_cpu_prepare(int cpu) ...@@ -572,6 +579,7 @@ static int rapl_cpu_prepare(int cpu)
INIT_LIST_HEAD(&pmu->active_list); INIT_LIST_HEAD(&pmu->active_list);
pmu->pmu = &rapl_pmu_class; pmu->pmu = &rapl_pmu_class;
pmu->cpu = cpu;
pmu->timer_interval = ms_to_ktime(rapl_timer_ms); pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment