Commit 4fb0d2ea authored by Will Deacon's avatar Will Deacon

Merge branches 'hwbreak', 'perf/updates' and 'perf/system-pmus' into for-rmk

parents d1244336 7325eaec
...@@ -13,7 +13,12 @@ ...@@ -13,7 +13,12 @@
#define __ARM_PMU_H__ #define __ARM_PMU_H__
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/perf_event.h>
/*
* Types of PMUs that can be accessed directly and require mutual
* exclusion between profiling tools.
*/
enum arm_pmu_type { enum arm_pmu_type {
ARM_PMU_DEVICE_CPU = 0, ARM_PMU_DEVICE_CPU = 0,
ARM_NUM_PMU_DEVICES, ARM_NUM_PMU_DEVICES,
...@@ -37,21 +42,17 @@ struct arm_pmu_platdata { ...@@ -37,21 +42,17 @@ struct arm_pmu_platdata {
* reserve_pmu() - reserve the hardware performance counters * reserve_pmu() - reserve the hardware performance counters
* *
* Reserve the hardware performance counters in the system for exclusive use. * Reserve the hardware performance counters in the system for exclusive use.
* The platform_device for the system is returned on success, ERR_PTR() * Returns 0 on success or -EBUSY if the lock is already held.
* encoded error on failure.
*/ */
extern struct platform_device * extern int
reserve_pmu(enum arm_pmu_type type); reserve_pmu(enum arm_pmu_type type);
/** /**
* release_pmu() - Relinquish control of the performance counters * release_pmu() - Relinquish control of the performance counters
* *
* Release the performance counters and allow someone else to use them. * Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
* this. The platform_device returned from reserve_pmu() must be passed as
* a cookie.
*/ */
extern int extern void
release_pmu(enum arm_pmu_type type); release_pmu(enum arm_pmu_type type);
/** /**
...@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type); ...@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type);
#include <linux/err.h> #include <linux/err.h>
static inline struct platform_device *
reserve_pmu(enum arm_pmu_type type)
{
return ERR_PTR(-ENODEV);
}
static inline int static inline int
release_pmu(enum arm_pmu_type type) reserve_pmu(enum arm_pmu_type type)
{ {
return -ENODEV; return -ENODEV;
} }
static inline int static inline void
init_pmu(enum arm_pmu_type type) release_pmu(enum arm_pmu_type type) { }
{
return -ENODEV;
}
#endif /* CONFIG_CPU_HAS_PMU */ #endif /* CONFIG_CPU_HAS_PMU */
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
struct pmu_hw_events {
/*
* The events that are active on the PMU for the given index.
*/
struct perf_event **events;
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned long *used_mask;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t pmu_lock;
};
struct arm_pmu {
struct pmu pmu;
enum arm_perf_pmu_ids id;
enum arm_pmu_type type;
cpumask_t active_irqs;
const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct hw_perf_event *hwc);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
u32 (*read_counter)(int idx);
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
struct pmu_hw_events *(*get_hw_events)(void);
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx, int overflow);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx);
#endif /* CONFIG_HW_PERF_EVENTS */
#endif /* __ARM_PMU_H__ */ #endif /* __ARM_PMU_H__ */
This diff is collapsed.
...@@ -54,7 +54,7 @@ enum armv6_perf_types { ...@@ -54,7 +54,7 @@ enum armv6_perf_types {
}; };
enum armv6_counters { enum armv6_counters {
ARMV6_CYCLE_COUNTER = 1, ARMV6_CYCLE_COUNTER = 0,
ARMV6_COUNTER0, ARMV6_COUNTER0,
ARMV6_COUNTER1, ARMV6_COUNTER1,
}; };
...@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, ...@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0; mask = 0;
...@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, ...@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event * Mask out the current event and set the counter to count the event
* that we're interested in. * that we're interested in.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int counter_is_active(unsigned long pmcr, int idx)
{
unsigned long mask = 0;
if (idx == ARMV6_CYCLE_COUNTER)
mask = ARMV6_PMCR_CCOUNT_IEN;
else if (idx == ARMV6_COUNTER0)
mask = ARMV6_PMCR_COUNT0_IEN;
else if (idx == ARMV6_COUNTER1)
mask = ARMV6_PMCR_COUNT1_IEN;
if (mask)
return pmcr & mask;
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return 0;
} }
static irqreturn_t static irqreturn_t
...@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num, ...@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
{ {
unsigned long pmcr = armv6_pmcr_read(); unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num, ...@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask)) if (!counter_is_active(pmcr, idx))
continue; continue;
/* /*
...@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num, ...@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
/* /*
...@@ -527,28 +545,30 @@ static void ...@@ -527,28 +545,30 @@ static void
armv6pmu_start(void) armv6pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE; val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
armv6pmu_stop(void) armv6pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE; val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
/* Always place a cycle counter into the cycle counter. */ /* Always place a cycle counter into the cycle counter. */
...@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, ...@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN; mask = ARMV6_PMCR_CCOUNT_IEN;
...@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, ...@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should * of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment. * be disabled and so this should never increment.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
...@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, ...@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, flags, evt = 0; unsigned long val, mask, flags, evt = 0;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN; mask = ARMV6_PMCR_CCOUNT_IEN;
...@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, ...@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We * Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting. * simply disable the interrupt reporting.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int armv6_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv6_perf_map,
&armv6_perf_cache_map, 0xFF);
} }
static const struct arm_pmu armv6pmu = { static struct arm_pmu armv6pmu = {
.id = ARM_PERF_PMU_ID_V6, .id = ARM_PERF_PMU_ID_V6,
.name = "v6", .name = "v6",
.handle_irq = armv6pmu_handle_irq, .handle_irq = armv6pmu_handle_irq,
...@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = { ...@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
.get_event_idx = armv6pmu_get_event_idx, .get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start, .start = armv6pmu_start,
.stop = armv6pmu_stop, .stop = armv6pmu_stop,
.cache_map = &armv6_perf_cache_map, .map_event = armv6_map_event,
.event_map = &armv6_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init armv6pmu_init(void) static struct arm_pmu *__init armv6pmu_init(void)
{ {
return &armv6pmu; return &armv6pmu;
} }
...@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void) ...@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
* disable the interrupt reporting and update the event. When unthrottling we * disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting. * reset the period and enable the interrupt reporting.
*/ */
static const struct arm_pmu armv6mpcore_pmu = {
static int armv6mpcore_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv6mpcore_perf_map,
&armv6mpcore_perf_cache_map, 0xFF);
}
static struct arm_pmu armv6mpcore_pmu = {
.id = ARM_PERF_PMU_ID_V6MP, .id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore", .name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq, .handle_irq = armv6pmu_handle_irq,
...@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = { ...@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
.get_event_idx = armv6pmu_get_event_idx, .get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start, .start = armv6pmu_start,
.stop = armv6pmu_stop, .stop = armv6pmu_stop,
.cache_map = &armv6mpcore_perf_cache_map, .map_event = armv6mpcore_map_event,
.event_map = &armv6mpcore_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init armv6mpcore_pmu_init(void) static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return &armv6mpcore_pmu; return &armv6mpcore_pmu;
} }
#else #else
static const struct arm_pmu *__init armv6pmu_init(void) static struct arm_pmu *__init armv6pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv6mpcore_pmu_init(void) static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return NULL; return NULL;
} }
......
This diff is collapsed.
...@@ -40,7 +40,7 @@ enum xscale_perf_types { ...@@ -40,7 +40,7 @@ enum xscale_perf_types {
}; };
enum xscale_counters { enum xscale_counters {
XSCALE_CYCLE_COUNTER = 1, XSCALE_CYCLE_COUNTER = 0,
XSCALE_COUNTER0, XSCALE_COUNTER0,
XSCALE_COUNTER1, XSCALE_COUNTER1,
XSCALE_COUNTER2, XSCALE_COUNTER2,
...@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc; unsigned long pmnc;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue; continue;
...@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
irq_work_run(); irq_work_run();
...@@ -284,6 +281,7 @@ static void ...@@ -284,6 +281,7 @@ static void
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) { switch (idx) {
case XSCALE_CYCLE_COUNTER: case XSCALE_CYCLE_COUNTER:
...@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) { switch (idx) {
case XSCALE_CYCLE_COUNTER: case XSCALE_CYCLE_COUNTER:
...@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) ...@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
if (XSCALE_PERFCTR_CCNT == event->config_base) { if (XSCALE_PERFCTR_CCNT == event->config_base) {
...@@ -368,24 +367,26 @@ static void ...@@ -368,24 +367,26 @@ static void
xscale1pmu_start(void) xscale1pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE; val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale1pmu_stop(void) xscale1pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE; val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static inline u32 static inline u32
...@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val) ...@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
} }
} }
static const struct arm_pmu xscale1pmu = { static int xscale_map_event(struct perf_event *event)
{
return map_cpu_event(event, &xscale_perf_map,
&xscale_perf_cache_map, 0xFF);
}
static struct arm_pmu xscale1pmu = {
.id = ARM_PERF_PMU_ID_XSCALE1, .id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1", .name = "xscale1",
.handle_irq = xscale1pmu_handle_irq, .handle_irq = xscale1pmu_handle_irq,
...@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = { ...@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
.get_event_idx = xscale1pmu_get_event_idx, .get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start, .start = xscale1pmu_start,
.stop = xscale1pmu_stop, .stop = xscale1pmu_stop,
.cache_map = &xscale_perf_cache_map, .map_event = xscale_map_event,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init xscale1pmu_init(void) static struct arm_pmu *__init xscale1pmu_init(void)
{ {
return &xscale1pmu; return &xscale1pmu;
} }
...@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc, of_flags; unsigned long pmnc, of_flags;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue; continue;
...@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
irq_work_run(); irq_work_run();
...@@ -616,6 +618,7 @@ static void ...@@ -616,6 +618,7 @@ static void
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags, ien, evtsel; unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable(); ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select(); evtsel = xscale2pmu_read_event_select();
...@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel); xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien); xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags, ien, evtsel; unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable(); ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select(); evtsel = xscale2pmu_read_event_select();
...@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) ...@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel); xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien); xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
int idx = xscale1pmu_get_event_idx(cpuc, event); int idx = xscale1pmu_get_event_idx(cpuc, event);
...@@ -718,24 +722,26 @@ static void ...@@ -718,24 +722,26 @@ static void
xscale2pmu_start(void) xscale2pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE; val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val); xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale2pmu_stop(void) xscale2pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc(); val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE; val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val); xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static inline u32 static inline u32
...@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val) ...@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
} }
} }
static const struct arm_pmu xscale2pmu = { static struct arm_pmu xscale2pmu = {
.id = ARM_PERF_PMU_ID_XSCALE2, .id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2", .name = "xscale2",
.handle_irq = xscale2pmu_handle_irq, .handle_irq = xscale2pmu_handle_irq,
...@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = { ...@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
.get_event_idx = xscale2pmu_get_event_idx, .get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start, .start = xscale2pmu_start,
.stop = xscale2pmu_stop, .stop = xscale2pmu_stop,
.cache_map = &xscale_perf_cache_map, .map_event = xscale_map_event,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 5, .num_events = 5,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init xscale2pmu_init(void) static struct arm_pmu *__init xscale2pmu_init(void)
{ {
return &xscale2pmu; return &xscale2pmu;
} }
#else #else
static const struct arm_pmu *__init xscale1pmu_init(void) static struct arm_pmu *__init xscale1pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init xscale2pmu_init(void) static struct arm_pmu *__init xscale2pmu_init(void)
{ {
return NULL; return NULL;
} }
......
...@@ -10,192 +10,26 @@ ...@@ -10,192 +10,26 @@
* *
*/ */
#define pr_fmt(fmt) "PMU: " fmt
#include <linux/cpumask.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <asm/pmu.h> #include <asm/pmu.h>
static volatile long pmu_lock; /*
* PMU locking to ensure mutual exclusion between different subsystems.
static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; */
static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
static int __devinit pmu_register(struct platform_device *pdev,
enum arm_pmu_type type)
{
if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
pr_warning("received registration request for unknown "
"PMU device type %d\n", type);
return -EINVAL;
}
if (pmu_devices[type]) {
pr_warning("rejecting duplicate registration of PMU device "
"type %d.", type);
return -ENOSPC;
}
pr_info("registered new PMU device of type %d\n", type);
pmu_devices[type] = pdev;
return 0;
}
#define OF_MATCH_PMU(_name, _type) { \
.compatible = _name, \
.data = (void *)_type, \
}
#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
static struct of_device_id armpmu_of_device_ids[] = {
OF_MATCH_CPU("arm,cortex-a9-pmu"),
OF_MATCH_CPU("arm,cortex-a8-pmu"),
OF_MATCH_CPU("arm,arm1136-pmu"),
OF_MATCH_CPU("arm,arm1176-pmu"),
{},
};
#define PLAT_MATCH_PMU(_name, _type) { \
.name = _name, \
.driver_data = _type, \
}
#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
static struct platform_device_id armpmu_plat_device_ids[] = {
PLAT_MATCH_CPU("arm-pmu"),
{},
};
enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
{
const struct of_device_id *of_id;
const struct platform_device_id *pdev_id;
/* provided by of_device_id table */
if (pdev->dev.of_node) {
of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
BUG_ON(!of_id);
return (enum arm_pmu_type)of_id->data;
}
/* Provided by platform_device_id table */
pdev_id = platform_get_device_id(pdev);
BUG_ON(!pdev_id);
return pdev_id->driver_data;
}
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
return pmu_register(pdev, armpmu_device_type(pdev));
}
static struct platform_driver armpmu_driver = {
.driver = {
.name = "arm-pmu",
.of_match_table = armpmu_of_device_ids,
},
.probe = armpmu_device_probe,
.id_table = armpmu_plat_device_ids,
};
static int __init register_pmu_driver(void)
{
return platform_driver_register(&armpmu_driver);
}
device_initcall(register_pmu_driver);
struct platform_device * int
reserve_pmu(enum arm_pmu_type type) reserve_pmu(enum arm_pmu_type type)
{ {
struct platform_device *pdev; return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
if (test_and_set_bit_lock(type, &pmu_lock)) {
pdev = ERR_PTR(-EBUSY);
} else if (pmu_devices[type] == NULL) {
clear_bit_unlock(type, &pmu_lock);
pdev = ERR_PTR(-ENODEV);
} else {
pdev = pmu_devices[type];
}
return pdev;
} }
EXPORT_SYMBOL_GPL(reserve_pmu); EXPORT_SYMBOL_GPL(reserve_pmu);
int void
release_pmu(enum arm_pmu_type type) release_pmu(enum arm_pmu_type type)
{ {
if (WARN_ON(!pmu_devices[type])) clear_bit_unlock(type, pmu_lock);
return -EINVAL;
clear_bit_unlock(type, &pmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(release_pmu);
static int
set_irq_affinity(int irq,
unsigned int cpu)
{
#ifdef CONFIG_SMP
int err = irq_set_affinity(irq, cpumask_of(cpu));
if (err)
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
return err;
#else
return -EINVAL;
#endif
}
static int
init_cpu_pmu(void)
{
int i, irqs, err = 0;
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
if (!pdev)
return -ENODEV;
irqs = pdev->num_resources;
/*
* If we have a single PMU interrupt that we can't shift, assume that
* we're running on a uniprocessor machine and continue.
*/
if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
return 0;
for (i = 0; i < irqs; ++i) {
err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
return err;
}
int
init_pmu(enum arm_pmu_type type)
{
int err = 0;
switch (type) {
case ARM_PMU_DEVICE_CPU:
err = init_cpu_pmu();
break;
default:
pr_warning("attempt to initialise PMU of unknown "
"type %d\n", type);
err = -EINVAL;
}
return err;
} }
EXPORT_SYMBOL_GPL(init_pmu);
...@@ -5715,6 +5715,7 @@ struct pmu *perf_init_event(struct perf_event *event) ...@@ -5715,6 +5715,7 @@ struct pmu *perf_init_event(struct perf_event *event)
pmu = idr_find(&pmu_idr, event->attr.type); pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock(); rcu_read_unlock();
if (pmu) { if (pmu) {
event->pmu = pmu;
ret = pmu->event_init(event); ret = pmu->event_init(event);
if (ret) if (ret)
pmu = ERR_PTR(ret); pmu = ERR_PTR(ret);
...@@ -5722,6 +5723,7 @@ struct pmu *perf_init_event(struct perf_event *event) ...@@ -5722,6 +5723,7 @@ struct pmu *perf_init_event(struct perf_event *event)
} }
list_for_each_entry_rcu(pmu, &pmus, entry) { list_for_each_entry_rcu(pmu, &pmus, entry) {
event->pmu = pmu;
ret = pmu->event_init(event); ret = pmu->event_init(event);
if (!ret) if (!ret)
goto unlock; goto unlock;
...@@ -5848,8 +5850,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -5848,8 +5850,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
return ERR_PTR(err); return ERR_PTR(err);
} }
event->pmu = pmu;
if (!event->parent) { if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK) if (event->attach_state & PERF_ATTACH_TASK)
jump_label_inc(&perf_sched_events); jump_label_inc(&perf_sched_events);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment