Commit a208749c authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

perf/x86/intel/rapl: Make PMU lock raw

This lock is taken in hard interrupt context even on Preempt-RT. Make it raw
so RT does not have to patch it.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Harish Chegondi <harish.chegondi@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20160222221012.669411833@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7162b8fe
...@@ -120,7 +120,7 @@ static struct perf_pmu_events_attr event_attr_##v = { \ ...@@ -120,7 +120,7 @@ static struct perf_pmu_events_attr event_attr_##v = { \
}; };
struct rapl_pmu { struct rapl_pmu {
spinlock_t lock; raw_spinlock_t lock;
int n_active; int n_active;
struct list_head active_list; struct list_head active_list;
struct pmu *pmu; struct pmu *pmu;
...@@ -210,12 +210,12 @@ static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) ...@@ -210,12 +210,12 @@ static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
if (!pmu->n_active) if (!pmu->n_active)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
spin_lock_irqsave(&pmu->lock, flags); raw_spin_lock_irqsave(&pmu->lock, flags);
list_for_each_entry(event, &pmu->active_list, active_entry) list_for_each_entry(event, &pmu->active_list, active_entry)
rapl_event_update(event); rapl_event_update(event);
spin_unlock_irqrestore(&pmu->lock, flags); raw_spin_unlock_irqrestore(&pmu->lock, flags);
hrtimer_forward_now(hrtimer, pmu->timer_interval); hrtimer_forward_now(hrtimer, pmu->timer_interval);
...@@ -252,9 +252,9 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode) ...@@ -252,9 +252,9 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pmu->lock, flags); raw_spin_lock_irqsave(&pmu->lock, flags);
__rapl_pmu_event_start(pmu, event); __rapl_pmu_event_start(pmu, event);
spin_unlock_irqrestore(&pmu->lock, flags); raw_spin_unlock_irqrestore(&pmu->lock, flags);
} }
static void rapl_pmu_event_stop(struct perf_event *event, int mode) static void rapl_pmu_event_stop(struct perf_event *event, int mode)
...@@ -263,7 +263,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) ...@@ -263,7 +263,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pmu->lock, flags); raw_spin_lock_irqsave(&pmu->lock, flags);
/* mark event as deactivated and stopped */ /* mark event as deactivated and stopped */
if (!(hwc->state & PERF_HES_STOPPED)) { if (!(hwc->state & PERF_HES_STOPPED)) {
...@@ -288,7 +288,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) ...@@ -288,7 +288,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
hwc->state |= PERF_HES_UPTODATE; hwc->state |= PERF_HES_UPTODATE;
} }
spin_unlock_irqrestore(&pmu->lock, flags); raw_spin_unlock_irqrestore(&pmu->lock, flags);
} }
static int rapl_pmu_event_add(struct perf_event *event, int mode) static int rapl_pmu_event_add(struct perf_event *event, int mode)
...@@ -297,14 +297,14 @@ static int rapl_pmu_event_add(struct perf_event *event, int mode) ...@@ -297,14 +297,14 @@ static int rapl_pmu_event_add(struct perf_event *event, int mode)
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pmu->lock, flags); raw_spin_lock_irqsave(&pmu->lock, flags);
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (mode & PERF_EF_START) if (mode & PERF_EF_START)
__rapl_pmu_event_start(pmu, event); __rapl_pmu_event_start(pmu, event);
spin_unlock_irqrestore(&pmu->lock, flags); raw_spin_unlock_irqrestore(&pmu->lock, flags);
return 0; return 0;
} }
...@@ -567,7 +567,7 @@ static int rapl_cpu_prepare(int cpu) ...@@ -567,7 +567,7 @@ static int rapl_cpu_prepare(int cpu)
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
if (!pmu) if (!pmu)
return -1; return -1;
spin_lock_init(&pmu->lock); raw_spin_lock_init(&pmu->lock);
INIT_LIST_HEAD(&pmu->active_list); INIT_LIST_HEAD(&pmu->active_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment