Commit 4fb0d2ea authored by Will Deacon's avatar Will Deacon

Merge branches 'hwbreak', 'perf/updates' and 'perf/system-pmus' into for-rmk

parents d1244336 7325eaec
...@@ -13,7 +13,12 @@ ...@@ -13,7 +13,12 @@
#define __ARM_PMU_H__ #define __ARM_PMU_H__
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/perf_event.h>
/*
* Types of PMUs that can be accessed directly and require mutual
* exclusion between profiling tools.
*/
enum arm_pmu_type { enum arm_pmu_type {
ARM_PMU_DEVICE_CPU = 0, ARM_PMU_DEVICE_CPU = 0,
ARM_NUM_PMU_DEVICES, ARM_NUM_PMU_DEVICES,
...@@ -37,21 +42,17 @@ struct arm_pmu_platdata { ...@@ -37,21 +42,17 @@ struct arm_pmu_platdata {
* reserve_pmu() - reserve the hardware performance counters * reserve_pmu() - reserve the hardware performance counters
* *
* Reserve the hardware performance counters in the system for exclusive use. * Reserve the hardware performance counters in the system for exclusive use.
* The platform_device for the system is returned on success, ERR_PTR() * Returns 0 on success or -EBUSY if the lock is already held.
* encoded error on failure.
*/ */
extern struct platform_device * extern int
reserve_pmu(enum arm_pmu_type type); reserve_pmu(enum arm_pmu_type type);
/** /**
* release_pmu() - Relinquish control of the performance counters * release_pmu() - Relinquish control of the performance counters
* *
* Release the performance counters and allow someone else to use them. * Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
* this. The platform_device returned from reserve_pmu() must be passed as
* a cookie.
*/ */
extern int extern void
release_pmu(enum arm_pmu_type type); release_pmu(enum arm_pmu_type type);
/** /**
...@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type); ...@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type);
#include <linux/err.h> #include <linux/err.h>
static inline struct platform_device *
reserve_pmu(enum arm_pmu_type type)
{
return ERR_PTR(-ENODEV);
}
static inline int static inline int
release_pmu(enum arm_pmu_type type) reserve_pmu(enum arm_pmu_type type)
{ {
return -ENODEV; return -ENODEV;
} }
static inline int static inline void
init_pmu(enum arm_pmu_type type) release_pmu(enum arm_pmu_type type) { }
{
return -ENODEV;
}
#endif /* CONFIG_CPU_HAS_PMU */ #endif /* CONFIG_CPU_HAS_PMU */
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
struct pmu_hw_events {
/*
* The events that are active on the PMU for the given index.
*/
struct perf_event **events;
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned long *used_mask;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t pmu_lock;
};
struct arm_pmu {
struct pmu pmu;
enum arm_perf_pmu_ids id;
enum arm_pmu_type type;
cpumask_t active_irqs;
const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct hw_perf_event *hwc);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
u32 (*read_counter)(int idx);
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
struct pmu_hw_events *(*get_hw_events)(void);
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx, int overflow);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx);
#endif /* CONFIG_HW_PERF_EVENTS */
#endif /* __ARM_PMU_H__ */ #endif /* __ARM_PMU_H__ */
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
*/ */
#define pr_fmt(fmt) "hw perfevents: " fmt #define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/bitmap.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -26,16 +27,8 @@ ...@@ -26,16 +27,8 @@
#include <asm/pmu.h> #include <asm/pmu.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
static struct platform_device *pmu_device;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
static DEFINE_RAW_SPINLOCK(pmu_lock);
/* /*
* ARMv6 supports a maximum of 3 events, starting from index 1. If we add * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
* another platform that supports more, we need to increase this to be the * another platform that supports more, we need to increase this to be the
* largest of all platforms. * largest of all platforms.
* *
...@@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock); ...@@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock);
* cycle counter CCNT + 31 events counters CNT0..30. * cycle counter CCNT + 31 events counters CNT0..30.
* Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
*/ */
#define ARMPMU_MAX_HWEVENTS 33 #define ARMPMU_MAX_HWEVENTS 32
/* The events for a given CPU. */ static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
struct cpu_hw_events { static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
/* static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
* The events that are active on the CPU for the given index. Index 0
* is reserved.
*/
struct perf_event *events[ARMPMU_MAX_HWEVENTS];
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
/* #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
* A 1 bit for an index indicates that the counter is actively being
* used.
*/
unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct arm_pmu {
enum arm_perf_pmu_ids id;
const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
int (*get_event_idx)(struct cpu_hw_events *cpuc,
struct hw_perf_event *hwc);
u32 (*read_counter)(int idx);
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
void (*reset)(void *);
const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
const unsigned (*event_map)[PERF_COUNT_HW_MAX];
u32 raw_event_mask;
int num_events;
u64 max_period;
};
/* Set at runtime when we know what CPU type we are. */ /* Set at runtime when we know what CPU type we are. */
static const struct arm_pmu *armpmu; static struct arm_pmu *cpu_pmu;
enum arm_perf_pmu_ids enum arm_perf_pmu_ids
armpmu_get_pmu_id(void) armpmu_get_pmu_id(void)
{ {
int id = -ENODEV; int id = -ENODEV;
if (armpmu != NULL) if (cpu_pmu != NULL)
id = armpmu->id; id = cpu_pmu->id;
return id; return id;
} }
...@@ -109,8 +64,8 @@ armpmu_get_max_events(void) ...@@ -109,8 +64,8 @@ armpmu_get_max_events(void)
{ {
int max_events = 0; int max_events = 0;
if (armpmu != NULL) if (cpu_pmu != NULL)
max_events = armpmu->num_events; max_events = cpu_pmu->num_events;
return max_events; return max_events;
} }
...@@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters); ...@@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#define CACHE_OP_UNSUPPORTED 0xFFFF #define CACHE_OP_UNSUPPORTED 0xFFFF
static int static int
armpmu_map_cache_event(u64 config) armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u64 config)
{ {
unsigned int cache_type, cache_op, cache_result, ret; unsigned int cache_type, cache_op, cache_result, ret;
...@@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config) ...@@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config)
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL; return -EINVAL;
ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED) if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT; return -ENOENT;
...@@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config) ...@@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config)
} }
static int static int
armpmu_map_event(u64 config) armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{ {
int mapping = (*armpmu->event_map)[config]; int mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
} }
static int static int
armpmu_map_raw_event(u64 config) armpmu_map_raw_event(u32 raw_event_mask, u64 config)
{ {
return (int)(config & armpmu->raw_event_mask); return (int)(config & raw_event_mask);
} }
static int static int map_cpu_event(struct perf_event *event,
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask)
{
u64 config = event->attr.config;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
return armpmu_map_event(event_map, config);
case PERF_TYPE_HW_CACHE:
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
}
int
armpmu_event_set_period(struct perf_event *event, armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, struct hw_perf_event *hwc,
int idx) int idx)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
s64 left = local64_read(&hwc->period_left); s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period; s64 period = hwc->sample_period;
int ret = 0; int ret = 0;
...@@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event, ...@@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event,
return ret; return ret;
} }
static u64 u64
armpmu_event_update(struct perf_event *event, armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc, struct hw_perf_event *hwc,
int idx, int overflow) int idx, int overflow)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count; u64 delta, prev_raw_count, new_raw_count;
again: again:
...@@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event) ...@@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event)
static void static void
armpmu_stop(struct perf_event *event, int flags) armpmu_stop(struct perf_event *event, int flags)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
if (!armpmu)
return;
/* /*
* ARM pmu always has to update the counter, so ignore * ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start(). * PERF_EF_UPDATE, see comments in armpmu_start().
...@@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags) ...@@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags)
static void static void
armpmu_start(struct perf_event *event, int flags) armpmu_start(struct perf_event *event, int flags)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
if (!armpmu)
return;
/* /*
* ARM pmu always has to reprogram the period, so ignore * ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below. * PERF_EF_RELOAD, see the comment below.
...@@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags) ...@@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags)
static void static void
armpmu_del(struct perf_event *event, int flags) armpmu_del(struct perf_event *event, int flags)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
WARN_ON(idx < 0); WARN_ON(idx < 0);
clear_bit(idx, cpuc->active_mask);
armpmu_stop(event, PERF_EF_UPDATE); armpmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL; hw_events->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask); clear_bit(idx, hw_events->used_mask);
perf_event_update_userpage(event); perf_event_update_userpage(event);
} }
...@@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags) ...@@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags)
static int static int
armpmu_add(struct perf_event *event, int flags) armpmu_add(struct perf_event *event, int flags)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx; int idx;
int err = 0; int err = 0;
...@@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags) ...@@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags)
perf_pmu_disable(event->pmu); perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */ /* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(cpuc, hwc); idx = armpmu->get_event_idx(hw_events, hwc);
if (idx < 0) { if (idx < 0) {
err = idx; err = idx;
goto out; goto out;
...@@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags) ...@@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags)
*/ */
event->hw.idx = idx; event->hw.idx = idx;
armpmu->disable(hwc, idx); armpmu->disable(hwc, idx);
cpuc->events[idx] = event; hw_events->events[idx] = event;
set_bit(idx, cpuc->active_mask);
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START) if (flags & PERF_EF_START)
...@@ -345,25 +324,25 @@ armpmu_add(struct perf_event *event, int flags) ...@@ -345,25 +324,25 @@ armpmu_add(struct perf_event *event, int flags)
return err; return err;
} }
static struct pmu pmu;
static int static int
validate_event(struct cpu_hw_events *cpuc, validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event) struct perf_event *event)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event fake_event = event->hw; struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1; return 1;
return armpmu->get_event_idx(cpuc, &fake_event) >= 0; return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
} }
static int static int
validate_group(struct perf_event *event) validate_group(struct perf_event *event)
{ {
struct perf_event *sibling, *leader = event->group_leader; struct perf_event *sibling, *leader = event->group_leader;
struct cpu_hw_events fake_pmu; struct pmu_hw_events fake_pmu;
memset(&fake_pmu, 0, sizeof(fake_pmu)); memset(&fake_pmu, 0, sizeof(fake_pmu));
...@@ -383,110 +362,119 @@ validate_group(struct perf_event *event) ...@@ -383,110 +362,119 @@ validate_group(struct perf_event *event)
static irqreturn_t armpmu_platform_irq(int irq, void *dev) static irqreturn_t armpmu_platform_irq(int irq, void *dev)
{ {
struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev); struct arm_pmu *armpmu = (struct arm_pmu *) dev;
struct platform_device *plat_device = armpmu->plat_device;
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
return plat->handle_irq(irq, dev, armpmu->handle_irq); return plat->handle_irq(irq, dev, armpmu->handle_irq);
} }
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) {
if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, armpmu);
}
release_pmu(armpmu->type);
}
static int static int
armpmu_reserve_hardware(void) armpmu_reserve_hardware(struct arm_pmu *armpmu)
{ {
struct arm_pmu_platdata *plat; struct arm_pmu_platdata *plat;
irq_handler_t handle_irq; irq_handler_t handle_irq;
int i, err = -ENODEV, irq; int i, err, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU); err = reserve_pmu(armpmu->type);
if (IS_ERR(pmu_device)) { if (err) {
pr_warning("unable to reserve pmu\n"); pr_warning("unable to reserve pmu\n");
return PTR_ERR(pmu_device); return err;
} }
init_pmu(ARM_PMU_DEVICE_CPU);
plat = dev_get_platdata(&pmu_device->dev); plat = dev_get_platdata(&pmu_device->dev);
if (plat && plat->handle_irq) if (plat && plat->handle_irq)
handle_irq = armpmu_platform_irq; handle_irq = armpmu_platform_irq;
else else
handle_irq = armpmu->handle_irq; handle_irq = armpmu->handle_irq;
if (pmu_device->num_resources < 1) { irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
pr_err("no irqs for PMUs defined\n"); pr_err("no irqs for PMUs defined\n");
return -ENODEV; return -ENODEV;
} }
for (i = 0; i < pmu_device->num_resources; ++i) { for (i = 0; i < irqs; ++i) {
err = 0;
irq = platform_get_irq(pmu_device, i); irq = platform_get_irq(pmu_device, i);
if (irq < 0) if (irq < 0)
continue; continue;
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
}
err = request_irq(irq, handle_irq, err = request_irq(irq, handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING, IRQF_DISABLED | IRQF_NOBALANCING,
"armpmu", NULL); "arm-pmu", armpmu);
if (err) { if (err) {
pr_warning("unable to request IRQ%d for ARM perf " pr_err("unable to request IRQ%d for ARM PMU counters\n",
"counters\n", irq); irq);
break; armpmu_release_hardware(armpmu);
return err;
} }
}
if (err) { cpumask_set_cpu(i, &armpmu->active_irqs);
for (i = i - 1; i >= 0; --i) {
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, NULL);
}
release_pmu(ARM_PMU_DEVICE_CPU);
pmu_device = NULL;
} }
return err; return 0;
} }
static void static void
armpmu_release_hardware(void) hw_perf_event_destroy(struct perf_event *event)
{ {
int i, irq; struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
atomic_t *active_events = &armpmu->active_events;
struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
for (i = pmu_device->num_resources - 1; i >= 0; --i) { if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
irq = platform_get_irq(pmu_device, i); armpmu_release_hardware(armpmu);
if (irq >= 0) mutex_unlock(pmu_reserve_mutex);
free_irq(irq, NULL);
} }
armpmu->stop();
release_pmu(ARM_PMU_DEVICE_CPU);
pmu_device = NULL;
} }
static atomic_t active_events = ATOMIC_INIT(0); static int
static DEFINE_MUTEX(pmu_reserve_mutex); event_requires_mode_exclusion(struct perf_event_attr *attr)
static void
hw_perf_event_destroy(struct perf_event *event)
{ {
if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { return attr->exclude_idle || attr->exclude_user ||
armpmu_release_hardware(); attr->exclude_kernel || attr->exclude_hv;
mutex_unlock(&pmu_reserve_mutex);
}
} }
static int static int
__hw_perf_event_init(struct perf_event *event) __hw_perf_event_init(struct perf_event *event)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int mapping, err; int mapping, err;
/* Decode the generic type into an ARM event identifier. */ mapping = armpmu->map_event(event);
if (PERF_TYPE_HARDWARE == event->attr.type) {
mapping = armpmu_map_event(event->attr.config);
} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
mapping = armpmu_map_cache_event(event->attr.config);
} else if (PERF_TYPE_RAW == event->attr.type) {
mapping = armpmu_map_raw_event(event->attr.config);
} else {
pr_debug("event type %x not supported\n", event->attr.type);
return -EOPNOTSUPP;
}
if (mapping < 0) { if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type, pr_debug("event %x:%llx not supported\n", event->attr.type,
...@@ -494,35 +482,32 @@ __hw_perf_event_init(struct perf_event *event) ...@@ -494,35 +482,32 @@ __hw_perf_event_init(struct perf_event *event)
return mapping; return mapping;
} }
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
*/
hwc->idx = -1;
hwc->config_base = 0;
hwc->config = 0;
hwc->event_base = 0;
/* /*
* Check whether we need to exclude the counter from certain modes. * Check whether we need to exclude the counter from certain modes.
* The ARM performance counters are on all of the time so if someone
* has asked us for some excludes then we have to fail.
*/ */
if (event->attr.exclude_kernel || event->attr.exclude_user || if ((!armpmu->set_event_filter ||
event->attr.exclude_hv || event->attr.exclude_idle) { armpmu->set_event_filter(hwc, &event->attr)) &&
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support " pr_debug("ARM performance counters do not support "
"mode exclusion\n"); "mode exclusion\n");
return -EPERM; return -EPERM;
} }
/* /*
* We don't assign an index until we actually place the event onto * Store the event encoding into the config_base field.
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
*/ */
hwc->idx = -1; hwc->config_base |= (unsigned long)mapping;
/*
* Store the event encoding into the config_base field. config and
* event_base are unused as the only 2 things we need to know are
* the event mapping and the counter to use. The counter to use is
* also the indx and the config_base is the event type.
*/
hwc->config_base = (unsigned long)mapping;
hwc->config = 0;
hwc->event_base = 0;
if (!hwc->sample_period) { if (!hwc->sample_period) {
hwc->sample_period = armpmu->max_period; hwc->sample_period = armpmu->max_period;
...@@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event) ...@@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event)
static int armpmu_event_init(struct perf_event *event) static int armpmu_event_init(struct perf_event *event)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
int err = 0; int err = 0;
atomic_t *active_events = &armpmu->active_events;
switch (event->attr.type) { if (armpmu->map_event(event) == -ENOENT)
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT; return -ENOENT;
}
if (!armpmu)
return -ENODEV;
event->destroy = hw_perf_event_destroy; event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(&active_events)) { if (!atomic_inc_not_zero(active_events)) {
mutex_lock(&pmu_reserve_mutex); mutex_lock(&armpmu->reserve_mutex);
if (atomic_read(&active_events) == 0) { if (atomic_read(active_events) == 0)
err = armpmu_reserve_hardware(); err = armpmu_reserve_hardware(armpmu);
}
if (!err) if (!err)
atomic_inc(&active_events); atomic_inc(active_events);
mutex_unlock(&pmu_reserve_mutex); mutex_unlock(&armpmu->reserve_mutex);
} }
if (err) if (err)
...@@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event) ...@@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event)
static void armpmu_enable(struct pmu *pmu) static void armpmu_enable(struct pmu *pmu)
{ {
/* Enable all of the perf events on hardware. */ struct arm_pmu *armpmu = to_arm_pmu(pmu);
int idx, enabled = 0; struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (!armpmu)
return;
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
if (!event)
continue;
armpmu->enable(&event->hw, idx);
enabled = 1;
}
if (enabled) if (enabled)
armpmu->start(); armpmu->start();
...@@ -605,20 +568,32 @@ static void armpmu_enable(struct pmu *pmu) ...@@ -605,20 +568,32 @@ static void armpmu_enable(struct pmu *pmu)
static void armpmu_disable(struct pmu *pmu) static void armpmu_disable(struct pmu *pmu)
{ {
if (armpmu) struct arm_pmu *armpmu = to_arm_pmu(pmu);
armpmu->stop(); armpmu->stop();
} }
static struct pmu pmu = { static void __init armpmu_init(struct arm_pmu *armpmu)
.pmu_enable = armpmu_enable, {
.pmu_disable = armpmu_disable, atomic_set(&armpmu->active_events, 0);
.event_init = armpmu_event_init, mutex_init(&armpmu->reserve_mutex);
.add = armpmu_add,
.del = armpmu_del, armpmu->pmu = (struct pmu) {
.start = armpmu_start, .pmu_enable = armpmu_enable,
.stop = armpmu_stop, .pmu_disable = armpmu_disable,
.read = armpmu_read, .event_init = armpmu_event_init,
}; .add = armpmu_add,
.del = armpmu_del,
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
};
}
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
{
armpmu_init(armpmu);
return perf_pmu_register(&armpmu->pmu, name, type);
}
/* Include the PMU-specific implementations. */ /* Include the PMU-specific implementations. */
#include "perf_event_xscale.c" #include "perf_event_xscale.c"
...@@ -630,14 +605,72 @@ static struct pmu pmu = { ...@@ -630,14 +605,72 @@ static struct pmu pmu = {
* This requires SMP to be available, so exists as a separate initcall. * This requires SMP to be available, so exists as a separate initcall.
*/ */
static int __init static int __init
armpmu_reset(void) cpu_pmu_reset(void)
{
if (cpu_pmu && cpu_pmu->reset)
return on_each_cpu(cpu_pmu->reset, NULL, 1);
return 0;
}
arch_initcall(cpu_pmu_reset);
/*
* PMU platform driver and devicetree bindings.
*/
static struct of_device_id armpmu_of_device_ids[] = {
{.compatible = "arm,cortex-a9-pmu"},
{.compatible = "arm,cortex-a8-pmu"},
{.compatible = "arm,arm1136-pmu"},
{.compatible = "arm,arm1176-pmu"},
{},
};
static struct platform_device_id armpmu_plat_device_ids[] = {
{.name = "arm-pmu"},
{},
};
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{ {
if (armpmu && armpmu->reset) cpu_pmu->plat_device = pdev;
return on_each_cpu(armpmu->reset, NULL, 1);
return 0; return 0;
} }
arch_initcall(armpmu_reset);
static struct platform_driver armpmu_driver = {
.driver = {
.name = "arm-pmu",
.of_match_table = armpmu_of_device_ids,
},
.probe = armpmu_device_probe,
.id_table = armpmu_plat_device_ids,
};
static int __init register_pmu_driver(void)
{
return platform_driver_register(&armpmu_driver);
}
device_initcall(register_pmu_driver);
static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
return &__get_cpu_var(cpu_hw_events);
}
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
{
int cpu;
for_each_possible_cpu(cpu) {
struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
raw_spin_lock_init(&events->pmu_lock);
}
armpmu->get_hw_events = armpmu_get_cpu_events;
armpmu->type = ARM_PMU_DEVICE_CPU;
}
/*
* CPU PMU identification and registration.
*/
static int __init static int __init
init_hw_perf_events(void) init_hw_perf_events(void)
{ {
...@@ -651,22 +684,22 @@ init_hw_perf_events(void) ...@@ -651,22 +684,22 @@ init_hw_perf_events(void)
case 0xB360: /* ARM1136 */ case 0xB360: /* ARM1136 */
case 0xB560: /* ARM1156 */ case 0xB560: /* ARM1156 */
case 0xB760: /* ARM1176 */ case 0xB760: /* ARM1176 */
armpmu = armv6pmu_init(); cpu_pmu = armv6pmu_init();
break; break;
case 0xB020: /* ARM11mpcore */ case 0xB020: /* ARM11mpcore */
armpmu = armv6mpcore_pmu_init(); cpu_pmu = armv6mpcore_pmu_init();
break; break;
case 0xC080: /* Cortex-A8 */ case 0xC080: /* Cortex-A8 */
armpmu = armv7_a8_pmu_init(); cpu_pmu = armv7_a8_pmu_init();
break; break;
case 0xC090: /* Cortex-A9 */ case 0xC090: /* Cortex-A9 */
armpmu = armv7_a9_pmu_init(); cpu_pmu = armv7_a9_pmu_init();
break; break;
case 0xC050: /* Cortex-A5 */ case 0xC050: /* Cortex-A5 */
armpmu = armv7_a5_pmu_init(); cpu_pmu = armv7_a5_pmu_init();
break; break;
case 0xC0F0: /* Cortex-A15 */ case 0xC0F0: /* Cortex-A15 */
armpmu = armv7_a15_pmu_init(); cpu_pmu = armv7_a15_pmu_init();
break; break;
} }
/* Intel CPUs [xscale]. */ /* Intel CPUs [xscale]. */
...@@ -674,23 +707,23 @@ init_hw_perf_events(void) ...@@ -674,23 +707,23 @@ init_hw_perf_events(void)
part_number = (cpuid >> 13) & 0x7; part_number = (cpuid >> 13) & 0x7;
switch (part_number) { switch (part_number) {
case 1: case 1:
armpmu = xscale1pmu_init(); cpu_pmu = xscale1pmu_init();
break; break;
case 2: case 2:
armpmu = xscale2pmu_init(); cpu_pmu = xscale2pmu_init();
break; break;
} }
} }
if (armpmu) { if (cpu_pmu) {
pr_info("enabled with %s PMU driver, %d counters available\n", pr_info("enabled with %s PMU driver, %d counters available\n",
armpmu->name, armpmu->num_events); cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else { } else {
pr_info("no hardware support available\n"); pr_info("no hardware support available\n");
} }
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0; return 0;
} }
early_initcall(init_hw_perf_events); early_initcall(init_hw_perf_events);
......
...@@ -54,7 +54,7 @@ enum armv6_perf_types { ...@@ -54,7 +54,7 @@ enum armv6_perf_types {
}; };
enum armv6_counters { enum armv6_counters {
ARMV6_CYCLE_COUNTER = 1, ARMV6_CYCLE_COUNTER = 0,
ARMV6_COUNTER0, ARMV6_COUNTER0,
ARMV6_COUNTER1, ARMV6_COUNTER1,
}; };
...@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, ...@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0; mask = 0;
...@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, ...@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event * Mask out the current event and set the counter to count the event
* that we're interested in. * that we're interested in.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int counter_is_active(unsigned long pmcr, int idx)
{
unsigned long mask = 0;
if (idx == ARMV6_CYCLE_COUNTER)
mask = ARMV6_PMCR_CCOUNT_IEN;
else if (idx == ARMV6_COUNTER0)
mask = ARMV6_PMCR_COUNT0_IEN;
else if (idx == ARMV6_COUNTER1)
mask = ARMV6_PMCR_COUNT1_IEN;
if (mask)
return pmcr & mask;
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return 0;
} }
static irqreturn_t static irqreturn_t
...@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num, ...@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
{ {
unsigned long pmcr = armv6_pmcr_read(); unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num, ...@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask)) if (!counter_is_active(pmcr, idx))
continue; continue;
/* /*
...@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num, ...@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
/* /*
...@@ -527,28 +545,30 @@ static void ...@@ -527,28 +545,30 @@ static void
armv6pmu_start(void) armv6pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE; val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
armv6pmu_stop(void) armv6pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE; val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
/* Always place a cycle counter into the cycle counter. */ /* Always place a cycle counter into the cycle counter. */
...@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, ...@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN; mask = ARMV6_PMCR_CCOUNT_IEN;
...@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, ...@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should * of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment. * be disabled and so this should never increment.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
...@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, ...@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, flags, evt = 0; unsigned long val, mask, flags, evt = 0;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN; mask = ARMV6_PMCR_CCOUNT_IEN;
...@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, ...@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We * Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting. * simply disable the interrupt reporting.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int armv6_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv6_perf_map,
&armv6_perf_cache_map, 0xFF);
} }
static const struct arm_pmu armv6pmu = { static struct arm_pmu armv6pmu = {
.id = ARM_PERF_PMU_ID_V6, .id = ARM_PERF_PMU_ID_V6,
.name = "v6", .name = "v6",
.handle_irq = armv6pmu_handle_irq, .handle_irq = armv6pmu_handle_irq,
...@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = { ...@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
.get_event_idx = armv6pmu_get_event_idx, .get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start, .start = armv6pmu_start,
.stop = armv6pmu_stop, .stop = armv6pmu_stop,
.cache_map = &armv6_perf_cache_map, .map_event = armv6_map_event,
.event_map = &armv6_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init armv6pmu_init(void) static struct arm_pmu *__init armv6pmu_init(void)
{ {
return &armv6pmu; return &armv6pmu;
} }
...@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void) ...@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
* disable the interrupt reporting and update the event. When unthrottling we * disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting. * reset the period and enable the interrupt reporting.
*/ */
static const struct arm_pmu armv6mpcore_pmu = {
static int armv6mpcore_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv6mpcore_perf_map,
&armv6mpcore_perf_cache_map, 0xFF);
}
static struct arm_pmu armv6mpcore_pmu = {
.id = ARM_PERF_PMU_ID_V6MP, .id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore", .name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq, .handle_irq = armv6pmu_handle_irq,
...@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = { ...@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
.get_event_idx = armv6pmu_get_event_idx, .get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start, .start = armv6pmu_start,
.stop = armv6pmu_stop, .stop = armv6pmu_stop,
.cache_map = &armv6mpcore_perf_cache_map, .map_event = armv6mpcore_map_event,
.event_map = &armv6mpcore_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init armv6mpcore_pmu_init(void) static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return &armv6mpcore_pmu; return &armv6mpcore_pmu;
} }
#else #else
static const struct arm_pmu *__init armv6pmu_init(void) static struct arm_pmu *__init armv6pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv6mpcore_pmu_init(void) static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return NULL; return NULL;
} }
......
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
*/ */
#ifdef CONFIG_CPU_V7 #ifdef CONFIG_CPU_V7
static struct arm_pmu armv7pmu;
/* /*
* Common ARMv7 event types * Common ARMv7 event types
* *
...@@ -676,23 +679,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] ...@@ -676,23 +679,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
}; };
/* /*
* Perf Events counters * Perf Events' indices
*/ */
enum armv7_counters { #define ARMV7_IDX_CYCLE_COUNTER 0
ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ #define ARMV7_IDX_COUNTER0 1
ARMV7_COUNTER0 = 2, /* First event counter */ #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
};
#define ARMV7_MAX_COUNTERS 32
#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
/* /*
* The cycle counter is ARMV7_CYCLE_COUNTER. * ARMv7 low level PMNC access
* The first event counter is ARMV7_COUNTER0.
* The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
*/ */
#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
/* /*
* ARMv7 low level PMNC access * Perf Event to low level counters mapping
*/ */
#define ARMV7_IDX_TO_COUNTER(x) \
(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
/* /*
* Per-CPU PMNC: config reg * Per-CPU PMNC: config reg
...@@ -708,103 +712,76 @@ enum armv7_counters { ...@@ -708,103 +712,76 @@ enum armv7_counters {
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/* /*
* Available counters * FLAG: counters overflow flag status reg
*/
#define ARMV7_CNT0 0 /* First event counter */
#define ARMV7_CCNT 31 /* Cycle counter */
/* Perf Event to low level counters mapping */
#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
/*
* CNTENS: counters enable reg
*/
#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
/*
* CNTENC: counters disable reg
*/
#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
/*
* INTENS: counters overflow interrupt enable reg
*/
#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
/*
* INTENC: counters overflow interrupt disable reg
*/
#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
/*
* EVTSEL: Event selection reg
*/ */
#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
/* /*
* SELECT: Counter selection reg * PMXEVTYPER: Event selection reg
*/ */
#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ #define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/* /*
* FLAG: counters overflow flag status reg * Event filters for PMUv2
*/ */
#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) #define ARMV7_EXCLUDE_PL1 (1 << 31)
#define ARMV7_FLAG_C (1 << ARMV7_CCNT) #define ARMV7_EXCLUDE_USER (1 << 30)
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_INCLUDE_HYP (1 << 27)
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
static inline unsigned long armv7_pmnc_read(void) static inline u32 armv7_pmnc_read(void)
{ {
u32 val; u32 val;
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
return val; return val;
} }
static inline void armv7_pmnc_write(unsigned long val) static inline void armv7_pmnc_write(u32 val)
{ {
val &= ARMV7_PMNC_MASK; val &= ARMV7_PMNC_MASK;
isb(); isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
} }
static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) static inline int armv7_pmnc_has_overflowed(u32 pmnc)
{ {
return pmnc & ARMV7_OVERFLOWED_MASK; return pmnc & ARMV7_OVERFLOWED_MASK;
} }
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, static inline int armv7_pmnc_counter_valid(int idx)
enum armv7_counters counter) {
return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
}
static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
{ {
int ret = 0; int ret = 0;
u32 counter;
if (counter == ARMV7_CYCLE_COUNTER) if (!armv7_pmnc_counter_valid(idx)) {
ret = pmnc & ARMV7_FLAG_C;
else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
ret = pmnc & ARMV7_FLAG_P(counter);
else
pr_err("CPU%u checking wrong counter %d overflow status\n", pr_err("CPU%u checking wrong counter %d overflow status\n",
smp_processor_id(), counter); smp_processor_id(), idx);
} else {
counter = ARMV7_IDX_TO_COUNTER(idx);
ret = pmnc & BIT(counter);
}
return ret; return ret;
} }
static inline int armv7_pmnc_select_counter(unsigned int idx) static inline int armv7_pmnc_select_counter(int idx)
{ {
u32 val; u32 counter;
if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u selecting wrong PMNC counter" pr_err("CPU%u selecting wrong PMNC counter %d\n",
" %d\n", smp_processor_id(), idx); smp_processor_id(), idx);
return -1; return -EINVAL;
} }
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
isb(); isb();
return idx; return idx;
...@@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx) ...@@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
static inline u32 armv7pmu_read_counter(int idx) static inline u32 armv7pmu_read_counter(int idx)
{ {
unsigned long value = 0; u32 value = 0;
if (idx == ARMV7_CYCLE_COUNTER) if (!armv7_pmnc_counter_valid(idx))
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mrc p15, 0, %0, c9, c13, 2"
: "=r" (value));
} else
pr_err("CPU%u reading wrong counter %d\n", pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx); smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
else if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
return value; return value;
} }
static inline void armv7pmu_write_counter(int idx, u32 value) static inline void armv7pmu_write_counter(int idx, u32 value)
{ {
if (idx == ARMV7_CYCLE_COUNTER) if (!armv7_pmnc_counter_valid(idx))
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mcr p15, 0, %0, c9, c13, 2"
: : "r" (value));
} else
pr_err("CPU%u writing wrong counter %d\n", pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx); smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
else if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
} }
static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
{ {
if (armv7_pmnc_select_counter(idx) == idx) { if (armv7_pmnc_select_counter(idx) == idx) {
val &= ARMV7_EVTSEL_MASK; val &= ARMV7_EVTYPE_MASK;
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
} }
} }
static inline u32 armv7_pmnc_enable_counter(unsigned int idx) static inline int armv7_pmnc_enable_counter(int idx)
{ {
u32 val; u32 counter;
if ((idx != ARMV7_CYCLE_COUNTER) && if (!armv7_pmnc_counter_valid(idx)) {
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u enabling wrong PMNC counter %d\n",
pr_err("CPU%u enabling wrong PMNC counter" smp_processor_id(), idx);
" %d\n", smp_processor_id(), idx); return -EINVAL;
return -1;
} }
if (idx == ARMV7_CYCLE_COUNTER) counter = ARMV7_IDX_TO_COUNTER(idx);
val = ARMV7_CNTENS_C; asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
else
val = ARMV7_CNTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
return idx; return idx;
} }
static inline u32 armv7_pmnc_disable_counter(unsigned int idx) static inline int armv7_pmnc_disable_counter(int idx)
{ {
u32 val; u32 counter;
if ((idx != ARMV7_CYCLE_COUNTER) && if (!armv7_pmnc_counter_valid(idx)) {
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u disabling wrong PMNC counter %d\n",
pr_err("CPU%u disabling wrong PMNC counter" smp_processor_id(), idx);
" %d\n", smp_processor_id(), idx); return -EINVAL;
return -1;
} }
if (idx == ARMV7_CYCLE_COUNTER) counter = ARMV7_IDX_TO_COUNTER(idx);
val = ARMV7_CNTENC_C; asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
else
val = ARMV7_CNTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
return idx; return idx;
} }
static inline u32 armv7_pmnc_enable_intens(unsigned int idx) static inline int armv7_pmnc_enable_intens(int idx)
{ {
u32 val; u32 counter;
if ((idx != ARMV7_CYCLE_COUNTER) && if (!armv7_pmnc_counter_valid(idx)) {
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
pr_err("CPU%u enabling wrong PMNC counter" smp_processor_id(), idx);
" interrupt enable %d\n", smp_processor_id(), idx); return -EINVAL;
return -1;
} }
if (idx == ARMV7_CYCLE_COUNTER) counter = ARMV7_IDX_TO_COUNTER(idx);
val = ARMV7_INTENS_C; asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
else
val = ARMV7_INTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
return idx; return idx;
} }
static inline u32 armv7_pmnc_disable_intens(unsigned int idx) static inline int armv7_pmnc_disable_intens(int idx)
{ {
u32 val; u32 counter;
if ((idx != ARMV7_CYCLE_COUNTER) && if (!armv7_pmnc_counter_valid(idx)) {
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
pr_err("CPU%u disabling wrong PMNC counter" smp_processor_id(), idx);
" interrupt enable %d\n", smp_processor_id(), idx); return -EINVAL;
return -1;
} }
if (idx == ARMV7_CYCLE_COUNTER) counter = ARMV7_IDX_TO_COUNTER(idx);
val = ARMV7_INTENC_C; asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
else
val = ARMV7_INTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
return idx; return idx;
} }
...@@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void) ...@@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
printk(KERN_INFO "CCNT =0x%08x\n", val); printk(KERN_INFO "CCNT =0x%08x\n", val);
for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
armv7_pmnc_select_counter(cnt); armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
printk(KERN_INFO "CNT[%d] count =0x%08x\n", printk(KERN_INFO "CNT[%d] count =0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val); ARMV7_IDX_TO_COUNTER(cnt), val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val); ARMV7_IDX_TO_COUNTER(cnt), val);
} }
} }
#endif #endif
...@@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void) ...@@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void)
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags; unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/* /*
* Enable counter and interrupt, and set the counter to count * Enable counter and interrupt, and set the counter to count
* the event that we're interested in. * the event that we're interested in.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* /*
* Disable counter * Disable counter
...@@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
/* /*
* Set event (if destined for PMNx counters) * Set event (if destined for PMNx counters)
* We don't need to set the event if it's a cycle count * We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
*/ */
if (idx != ARMV7_CYCLE_COUNTER) if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base); armv7_pmnc_write_evtsel(idx, hwc->config_base);
/* /*
...@@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
*/ */
armv7_pmnc_enable_counter(idx); armv7_pmnc_enable_counter(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags; unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/* /*
* Disable counter and interrupt * Disable counter and interrupt
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* /*
* Disable counter * Disable counter
...@@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) ...@@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
*/ */
armv7_pmnc_disable_intens(idx); armv7_pmnc_disable_intens(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc; u32 pmnc;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) ...@@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/* /*
* We have a single interrupt for all counters. Check that * We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it. * each counter has overflowed before we process it.
...@@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) ...@@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
/* /*
...@@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) ...@@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
static void armv7pmu_start(void) static void armv7pmu_start(void)
{ {
unsigned long flags; unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */ /* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void armv7pmu_stop(void) static void armv7pmu_stop(void)
{ {
unsigned long flags; unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */ /* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
int idx; int idx;
unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */ /* Always place a cycle counter into the cycle counter. */
if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN; return -EAGAIN;
return ARMV7_CYCLE_COUNTER; return ARMV7_IDX_CYCLE_COUNTER;
} else { }
/*
* For anything other than a cycle counter, try and use
* the events counters
*/
for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
/* The counters are all in use. */ /*
return -EAGAIN; * For anything other than a cycle counter, try and use
* the events counters
*/
for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
} }
/* The counters are all in use. */
return -EAGAIN;
}
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
static int armv7pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
if (attr->exclude_idle)
return -EPERM;
if (attr->exclude_user)
config_base |= ARMV7_EXCLUDE_USER;
if (attr->exclude_kernel)
config_base |= ARMV7_EXCLUDE_PL1;
if (!attr->exclude_hv)
config_base |= ARMV7_INCLUDE_HYP;
/*
* Install the filter into config_base as this is used to
* construct the event type.
*/
event->config_base = config_base;
return 0;
} }
static void armv7pmu_reset(void *info) static void armv7pmu_reset(void *info)
{ {
u32 idx, nb_cnt = armpmu->num_events; u32 idx, nb_cnt = cpu_pmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */ /* The counter and interrupt enable registers are unknown at reset. */
for (idx = 1; idx < nb_cnt; ++idx) for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
armv7pmu_disable_event(NULL, idx); armv7pmu_disable_event(NULL, idx);
/* Initialize & Reset PMNC: C and P bits */ /* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
} }
static int armv7_a8_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a8_perf_map,
&armv7_a8_perf_cache_map, 0xFF);
}
static int armv7_a9_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a9_perf_map,
&armv7_a9_perf_cache_map, 0xFF);
}
static int armv7_a5_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a5_perf_map,
&armv7_a5_perf_cache_map, 0xFF);
}
static int armv7_a15_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a15_perf_map,
&armv7_a15_perf_cache_map, 0xFF);
}
static struct arm_pmu armv7pmu = { static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq, .handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event, .enable = armv7pmu_enable_event,
...@@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = { ...@@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = {
.start = armv7pmu_start, .start = armv7pmu_start,
.stop = armv7pmu_stop, .stop = armv7pmu_stop,
.reset = armv7pmu_reset, .reset = armv7pmu_reset,
.raw_event_mask = 0xFF,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
...@@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void) ...@@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void)
return nb_cnt + 1; return nb_cnt + 1;
} }
static const struct arm_pmu *__init armv7_a8_pmu_init(void) static struct arm_pmu *__init armv7_a8_pmu_init(void)
{ {
armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8"; armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.cache_map = &armv7_a8_perf_cache_map; armv7pmu.map_event = armv7_a8_map_event;
armv7pmu.event_map = &armv7_a8_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu; return &armv7pmu;
} }
static const struct arm_pmu *__init armv7_a9_pmu_init(void) static struct arm_pmu *__init armv7_a9_pmu_init(void)
{ {
armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9"; armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.cache_map = &armv7_a9_perf_cache_map; armv7pmu.map_event = armv7_a9_map_event;
armv7pmu.event_map = &armv7_a9_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu; return &armv7pmu;
} }
static const struct arm_pmu *__init armv7_a5_pmu_init(void) static struct arm_pmu *__init armv7_a5_pmu_init(void)
{ {
armv7pmu.id = ARM_PERF_PMU_ID_CA5; armv7pmu.id = ARM_PERF_PMU_ID_CA5;
armv7pmu.name = "ARMv7 Cortex-A5"; armv7pmu.name = "ARMv7 Cortex-A5";
armv7pmu.cache_map = &armv7_a5_perf_cache_map; armv7pmu.map_event = armv7_a5_map_event;
armv7pmu.event_map = &armv7_a5_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu; return &armv7pmu;
} }
static const struct arm_pmu *__init armv7_a15_pmu_init(void) static struct arm_pmu *__init armv7_a15_pmu_init(void)
{ {
armv7pmu.id = ARM_PERF_PMU_ID_CA15; armv7pmu.id = ARM_PERF_PMU_ID_CA15;
armv7pmu.name = "ARMv7 Cortex-A15"; armv7pmu.name = "ARMv7 Cortex-A15";
armv7pmu.cache_map = &armv7_a15_perf_cache_map; armv7pmu.map_event = armv7_a15_map_event;
armv7pmu.event_map = &armv7_a15_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.num_events = armv7_read_num_pmnc_events();
armv7pmu.set_event_filter = armv7pmu_set_event_filter;
return &armv7pmu; return &armv7pmu;
} }
#else #else
static const struct arm_pmu *__init armv7_a8_pmu_init(void) static struct arm_pmu *__init armv7_a8_pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv7_a9_pmu_init(void) static struct arm_pmu *__init armv7_a9_pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv7_a5_pmu_init(void) static struct arm_pmu *__init armv7_a5_pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv7_a15_pmu_init(void) static struct arm_pmu *__init armv7_a15_pmu_init(void)
{ {
return NULL; return NULL;
} }
......
...@@ -40,7 +40,7 @@ enum xscale_perf_types { ...@@ -40,7 +40,7 @@ enum xscale_perf_types {
}; };
enum xscale_counters { enum xscale_counters {
XSCALE_CYCLE_COUNTER = 1, XSCALE_CYCLE_COUNTER = 0,
XSCALE_COUNTER0, XSCALE_COUNTER0,
XSCALE_COUNTER1, XSCALE_COUNTER1,
XSCALE_COUNTER2, XSCALE_COUNTER2,
...@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc; unsigned long pmnc;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue; continue;
...@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
irq_work_run(); irq_work_run();
...@@ -284,6 +281,7 @@ static void ...@@ -284,6 +281,7 @@ static void
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) { switch (idx) {
case XSCALE_CYCLE_COUNTER: case XSCALE_CYCLE_COUNTER:
...@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) { switch (idx) {
case XSCALE_CYCLE_COUNTER: case XSCALE_CYCLE_COUNTER:
...@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) ...@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
if (XSCALE_PERFCTR_CCNT == event->config_base) { if (XSCALE_PERFCTR_CCNT == event->config_base) {
...@@ -368,24 +367,26 @@ static void ...@@ -368,24 +367,26 @@ static void
xscale1pmu_start(void) xscale1pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE; val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale1pmu_stop(void) xscale1pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE; val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static inline u32 static inline u32
...@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val) ...@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
} }
} }
static const struct arm_pmu xscale1pmu = { static int xscale_map_event(struct perf_event *event)
{
return map_cpu_event(event, &xscale_perf_map,
&xscale_perf_cache_map, 0xFF);
}
static struct arm_pmu xscale1pmu = {
.id = ARM_PERF_PMU_ID_XSCALE1, .id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1", .name = "xscale1",
.handle_irq = xscale1pmu_handle_irq, .handle_irq = xscale1pmu_handle_irq,
...@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = { ...@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
.get_event_idx = xscale1pmu_get_event_idx, .get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start, .start = xscale1pmu_start,
.stop = xscale1pmu_stop, .stop = xscale1pmu_stop,
.cache_map = &xscale_perf_cache_map, .map_event = xscale_map_event,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init xscale1pmu_init(void) static struct arm_pmu *__init xscale1pmu_init(void)
{ {
return &xscale1pmu; return &xscale1pmu;
} }
...@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc, of_flags; unsigned long pmnc, of_flags;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue; continue;
...@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
irq_work_run(); irq_work_run();
...@@ -616,6 +618,7 @@ static void ...@@ -616,6 +618,7 @@ static void
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags, ien, evtsel; unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable(); ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select(); evtsel = xscale2pmu_read_event_select();
...@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel); xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien); xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags, ien, evtsel; unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable(); ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select(); evtsel = xscale2pmu_read_event_select();
...@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) ...@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel); xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien); xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
int idx = xscale1pmu_get_event_idx(cpuc, event); int idx = xscale1pmu_get_event_idx(cpuc, event);
...@@ -718,24 +722,26 @@ static void ...@@ -718,24 +722,26 @@ static void
xscale2pmu_start(void) xscale2pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE; val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val); xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale2pmu_stop(void) xscale2pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc(); val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE; val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val); xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static inline u32 static inline u32
...@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val) ...@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
} }
} }
static const struct arm_pmu xscale2pmu = { static struct arm_pmu xscale2pmu = {
.id = ARM_PERF_PMU_ID_XSCALE2, .id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2", .name = "xscale2",
.handle_irq = xscale2pmu_handle_irq, .handle_irq = xscale2pmu_handle_irq,
...@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = { ...@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
.get_event_idx = xscale2pmu_get_event_idx, .get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start, .start = xscale2pmu_start,
.stop = xscale2pmu_stop, .stop = xscale2pmu_stop,
.cache_map = &xscale_perf_cache_map, .map_event = xscale_map_event,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 5, .num_events = 5,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init xscale2pmu_init(void) static struct arm_pmu *__init xscale2pmu_init(void)
{ {
return &xscale2pmu; return &xscale2pmu;
} }
#else #else
static const struct arm_pmu *__init xscale1pmu_init(void) static struct arm_pmu *__init xscale1pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init xscale2pmu_init(void) static struct arm_pmu *__init xscale2pmu_init(void)
{ {
return NULL; return NULL;
} }
......
...@@ -10,192 +10,26 @@ ...@@ -10,192 +10,26 @@
* *
*/ */
#define pr_fmt(fmt) "PMU: " fmt
#include <linux/cpumask.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <asm/pmu.h> #include <asm/pmu.h>
static volatile long pmu_lock; /*
* PMU locking to ensure mutual exclusion between different subsystems.
static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; */
static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
static int __devinit pmu_register(struct platform_device *pdev,
enum arm_pmu_type type)
{
if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
pr_warning("received registration request for unknown "
"PMU device type %d\n", type);
return -EINVAL;
}
if (pmu_devices[type]) {
pr_warning("rejecting duplicate registration of PMU device "
"type %d.", type);
return -ENOSPC;
}
pr_info("registered new PMU device of type %d\n", type);
pmu_devices[type] = pdev;
return 0;
}
#define OF_MATCH_PMU(_name, _type) { \
.compatible = _name, \
.data = (void *)_type, \
}
#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
static struct of_device_id armpmu_of_device_ids[] = {
OF_MATCH_CPU("arm,cortex-a9-pmu"),
OF_MATCH_CPU("arm,cortex-a8-pmu"),
OF_MATCH_CPU("arm,arm1136-pmu"),
OF_MATCH_CPU("arm,arm1176-pmu"),
{},
};
#define PLAT_MATCH_PMU(_name, _type) { \
.name = _name, \
.driver_data = _type, \
}
#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
static struct platform_device_id armpmu_plat_device_ids[] = {
PLAT_MATCH_CPU("arm-pmu"),
{},
};
enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
{
const struct of_device_id *of_id;
const struct platform_device_id *pdev_id;
/* provided by of_device_id table */
if (pdev->dev.of_node) {
of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
BUG_ON(!of_id);
return (enum arm_pmu_type)of_id->data;
}
/* Provided by platform_device_id table */
pdev_id = platform_get_device_id(pdev);
BUG_ON(!pdev_id);
return pdev_id->driver_data;
}
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
return pmu_register(pdev, armpmu_device_type(pdev));
}
static struct platform_driver armpmu_driver = {
.driver = {
.name = "arm-pmu",
.of_match_table = armpmu_of_device_ids,
},
.probe = armpmu_device_probe,
.id_table = armpmu_plat_device_ids,
};
static int __init register_pmu_driver(void)
{
return platform_driver_register(&armpmu_driver);
}
device_initcall(register_pmu_driver);
struct platform_device * int
reserve_pmu(enum arm_pmu_type type) reserve_pmu(enum arm_pmu_type type)
{ {
struct platform_device *pdev; return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
if (test_and_set_bit_lock(type, &pmu_lock)) {
pdev = ERR_PTR(-EBUSY);
} else if (pmu_devices[type] == NULL) {
clear_bit_unlock(type, &pmu_lock);
pdev = ERR_PTR(-ENODEV);
} else {
pdev = pmu_devices[type];
}
return pdev;
} }
EXPORT_SYMBOL_GPL(reserve_pmu); EXPORT_SYMBOL_GPL(reserve_pmu);
int void
release_pmu(enum arm_pmu_type type) release_pmu(enum arm_pmu_type type)
{ {
if (WARN_ON(!pmu_devices[type])) clear_bit_unlock(type, pmu_lock);
return -EINVAL;
clear_bit_unlock(type, &pmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(release_pmu);
static int
set_irq_affinity(int irq,
unsigned int cpu)
{
#ifdef CONFIG_SMP
int err = irq_set_affinity(irq, cpumask_of(cpu));
if (err)
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
return err;
#else
return -EINVAL;
#endif
}
static int
init_cpu_pmu(void)
{
int i, irqs, err = 0;
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
if (!pdev)
return -ENODEV;
irqs = pdev->num_resources;
/*
* If we have a single PMU interrupt that we can't shift, assume that
* we're running on a uniprocessor machine and continue.
*/
if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
return 0;
for (i = 0; i < irqs; ++i) {
err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
return err;
}
int
init_pmu(enum arm_pmu_type type)
{
int err = 0;
switch (type) {
case ARM_PMU_DEVICE_CPU:
err = init_cpu_pmu();
break;
default:
pr_warning("attempt to initialise PMU of unknown "
"type %d\n", type);
err = -EINVAL;
}
return err;
} }
EXPORT_SYMBOL_GPL(init_pmu);
...@@ -5715,6 +5715,7 @@ struct pmu *perf_init_event(struct perf_event *event) ...@@ -5715,6 +5715,7 @@ struct pmu *perf_init_event(struct perf_event *event)
pmu = idr_find(&pmu_idr, event->attr.type); pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock(); rcu_read_unlock();
if (pmu) { if (pmu) {
event->pmu = pmu;
ret = pmu->event_init(event); ret = pmu->event_init(event);
if (ret) if (ret)
pmu = ERR_PTR(ret); pmu = ERR_PTR(ret);
...@@ -5722,6 +5723,7 @@ struct pmu *perf_init_event(struct perf_event *event) ...@@ -5722,6 +5723,7 @@ struct pmu *perf_init_event(struct perf_event *event)
} }
list_for_each_entry_rcu(pmu, &pmus, entry) { list_for_each_entry_rcu(pmu, &pmus, entry) {
event->pmu = pmu;
ret = pmu->event_init(event); ret = pmu->event_init(event);
if (!ret) if (!ret)
goto unlock; goto unlock;
...@@ -5848,8 +5850,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -5848,8 +5850,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
return ERR_PTR(err); return ERR_PTR(err);
} }
event->pmu = pmu;
if (!event->parent) { if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK) if (event->attach_state & PERF_ATTACH_TASK)
jump_label_inc(&perf_sched_events); jump_label_inc(&perf_sched_events);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment