Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
4fb0d2ea
Commit
4fb0d2ea
authored
Aug 31, 2011
by
Will Deacon
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'hwbreak', 'perf/updates' and 'perf/system-pmus' into for-rmk
parents
d1244336
7325eaec
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
640 additions
and
686 deletions
+640
-686
arch/arm/include/asm/pmu.h
arch/arm/include/asm/pmu.h
+74
-19
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event.c
+254
-221
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v6.c
+59
-28
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_v7.c
+196
-199
arch/arm/kernel/perf_event_xscale.c
arch/arm/kernel/perf_event_xscale.c
+47
-43
arch/arm/kernel/pmu.c
arch/arm/kernel/pmu.c
+8
-174
kernel/events/core.c
kernel/events/core.c
+2
-2
No files found.
arch/arm/include/asm/pmu.h
View file @
4fb0d2ea
...
...
@@ -13,7 +13,12 @@
#define __ARM_PMU_H__
#include <linux/interrupt.h>
#include <linux/perf_event.h>
/*
* Types of PMUs that can be accessed directly and require mutual
* exclusion between profiling tools.
*/
enum
arm_pmu_type
{
ARM_PMU_DEVICE_CPU
=
0
,
ARM_NUM_PMU_DEVICES
,
...
...
@@ -37,21 +42,17 @@ struct arm_pmu_platdata {
* reserve_pmu() - reserve the hardware performance counters
*
* Reserve the hardware performance counters in the system for exclusive use.
* The platform_device for the system is returned on success, ERR_PTR()
* encoded error on failure.
* Returns 0 on success or -EBUSY if the lock is already held.
*/
extern
struct
platform_device
*
extern
int
reserve_pmu
(
enum
arm_pmu_type
type
);
/**
* release_pmu() - Relinquish control of the performance counters
*
* Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
* this. The platform_device returned from reserve_pmu() must be passed as
* a cookie.
*/
extern
int
extern
void
release_pmu
(
enum
arm_pmu_type
type
);
/**
...
...
@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type);
#include <linux/err.h>
static
inline
struct
platform_device
*
reserve_pmu
(
enum
arm_pmu_type
type
)
{
return
ERR_PTR
(
-
ENODEV
);
}
static
inline
int
re
leas
e_pmu
(
enum
arm_pmu_type
type
)
re
serv
e_pmu
(
enum
arm_pmu_type
type
)
{
return
-
ENODEV
;
}
static
inline
int
init_pmu
(
enum
arm_pmu_type
type
)
{
return
-
ENODEV
;
}
static
inline
void
release_pmu
(
enum
arm_pmu_type
type
)
{
}
#endif
/* CONFIG_CPU_HAS_PMU */
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
struct
pmu_hw_events
{
/*
* The events that are active on the PMU for the given index.
*/
struct
perf_event
**
events
;
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned
long
*
used_mask
;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t
pmu_lock
;
};
struct
arm_pmu
{
struct
pmu
pmu
;
enum
arm_perf_pmu_ids
id
;
enum
arm_pmu_type
type
;
cpumask_t
active_irqs
;
const
char
*
name
;
irqreturn_t
(
*
handle_irq
)(
int
irq_num
,
void
*
dev
);
void
(
*
enable
)(
struct
hw_perf_event
*
evt
,
int
idx
);
void
(
*
disable
)(
struct
hw_perf_event
*
evt
,
int
idx
);
int
(
*
get_event_idx
)(
struct
pmu_hw_events
*
hw_events
,
struct
hw_perf_event
*
hwc
);
int
(
*
set_event_filter
)(
struct
hw_perf_event
*
evt
,
struct
perf_event_attr
*
attr
);
u32
(
*
read_counter
)(
int
idx
);
void
(
*
write_counter
)(
int
idx
,
u32
val
);
void
(
*
start
)(
void
);
void
(
*
stop
)(
void
);
void
(
*
reset
)(
void
*
);
int
(
*
map_event
)(
struct
perf_event
*
event
);
int
num_events
;
atomic_t
active_events
;
struct
mutex
reserve_mutex
;
u64
max_period
;
struct
platform_device
*
plat_device
;
struct
pmu_hw_events
*
(
*
get_hw_events
)(
void
);
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
int
__init
armpmu_register
(
struct
arm_pmu
*
armpmu
,
char
*
name
,
int
type
);
u64
armpmu_event_update
(
struct
perf_event
*
event
,
struct
hw_perf_event
*
hwc
,
int
idx
,
int
overflow
);
int
armpmu_event_set_period
(
struct
perf_event
*
event
,
struct
hw_perf_event
*
hwc
,
int
idx
);
#endif
/* CONFIG_HW_PERF_EVENTS */
#endif
/* __ARM_PMU_H__ */
arch/arm/kernel/perf_event.c
View file @
4fb0d2ea
...
...
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/bitmap.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
...
...
@@ -26,16 +27,8 @@
#include <asm/pmu.h>
#include <asm/stacktrace.h>
static
struct
platform_device
*
pmu_device
;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
static
DEFINE_RAW_SPINLOCK
(
pmu_lock
);
/*
* ARMv6 supports a maximum of 3 events, starting from index
1
. If we add
* ARMv6 supports a maximum of 3 events, starting from index
0
. If we add
* another platform that supports more, we need to increase this to be the
* largest of all platforms.
*
...
...
@@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock);
* cycle counter CCNT + 31 events counters CNT0..30.
* Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
*/
#define ARMPMU_MAX_HWEVENTS 3
3
#define ARMPMU_MAX_HWEVENTS 3
2
/* The events for a given CPU. */
struct
cpu_hw_events
{
/*
* The events that are active on the CPU for the given index. Index 0
* is reserved.
*/
struct
perf_event
*
events
[
ARMPMU_MAX_HWEVENTS
];
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned
long
used_mask
[
BITS_TO_LONGS
(
ARMPMU_MAX_HWEVENTS
)];
static
DEFINE_PER_CPU
(
struct
perf_event
*
[
ARMPMU_MAX_HWEVENTS
],
hw_events
);
static
DEFINE_PER_CPU
(
unsigned
long
[
BITS_TO_LONGS
(
ARMPMU_MAX_HWEVENTS
)],
used_mask
);
static
DEFINE_PER_CPU
(
struct
pmu_hw_events
,
cpu_hw_events
);
/*
* A 1 bit for an index indicates that the counter is actively being
* used.
*/
unsigned
long
active_mask
[
BITS_TO_LONGS
(
ARMPMU_MAX_HWEVENTS
)];
};
static
DEFINE_PER_CPU
(
struct
cpu_hw_events
,
cpu_hw_events
);
struct
arm_pmu
{
enum
arm_perf_pmu_ids
id
;
const
char
*
name
;
irqreturn_t
(
*
handle_irq
)(
int
irq_num
,
void
*
dev
);
void
(
*
enable
)(
struct
hw_perf_event
*
evt
,
int
idx
);
void
(
*
disable
)(
struct
hw_perf_event
*
evt
,
int
idx
);
int
(
*
get_event_idx
)(
struct
cpu_hw_events
*
cpuc
,
struct
hw_perf_event
*
hwc
);
u32
(
*
read_counter
)(
int
idx
);
void
(
*
write_counter
)(
int
idx
,
u32
val
);
void
(
*
start
)(
void
);
void
(
*
stop
)(
void
);
void
(
*
reset
)(
void
*
);
const
unsigned
(
*
cache_map
)[
PERF_COUNT_HW_CACHE_MAX
]
[
PERF_COUNT_HW_CACHE_OP_MAX
]
[
PERF_COUNT_HW_CACHE_RESULT_MAX
];
const
unsigned
(
*
event_map
)[
PERF_COUNT_HW_MAX
];
u32
raw_event_mask
;
int
num_events
;
u64
max_period
;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
/* Set at runtime when we know what CPU type we are. */
static
const
struct
arm_pmu
*
arm
pmu
;
static
struct
arm_pmu
*
cpu_
pmu
;
enum
arm_perf_pmu_ids
armpmu_get_pmu_id
(
void
)
{
int
id
=
-
ENODEV
;
if
(
arm
pmu
!=
NULL
)
id
=
arm
pmu
->
id
;
if
(
cpu_
pmu
!=
NULL
)
id
=
cpu_
pmu
->
id
;
return
id
;
}
...
...
@@ -109,8 +64,8 @@ armpmu_get_max_events(void)
{
int
max_events
=
0
;
if
(
arm
pmu
!=
NULL
)
max_events
=
arm
pmu
->
num_events
;
if
(
cpu_
pmu
!=
NULL
)
max_events
=
cpu_
pmu
->
num_events
;
return
max_events
;
}
...
...
@@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#define CACHE_OP_UNSUPPORTED 0xFFFF
static
int
armpmu_map_cache_event
(
u64
config
)
armpmu_map_cache_event
(
const
unsigned
(
*
cache_map
)
[
PERF_COUNT_HW_CACHE_MAX
]
[
PERF_COUNT_HW_CACHE_OP_MAX
]
[
PERF_COUNT_HW_CACHE_RESULT_MAX
],
u64
config
)
{
unsigned
int
cache_type
,
cache_op
,
cache_result
,
ret
;
...
...
@@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config)
if
(
cache_result
>=
PERF_COUNT_HW_CACHE_RESULT_MAX
)
return
-
EINVAL
;
ret
=
(
int
)(
*
armpmu
->
cache_map
)[
cache_type
][
cache_op
][
cache_result
];
ret
=
(
int
)(
*
cache_map
)[
cache_type
][
cache_op
][
cache_result
];
if
(
ret
==
CACHE_OP_UNSUPPORTED
)
return
-
ENOENT
;
...
...
@@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config)
}
static
int
armpmu_map_event
(
u64
config
)
armpmu_map_event
(
const
unsigned
(
*
event_map
)[
PERF_COUNT_HW_MAX
],
u64
config
)
{
int
mapping
=
(
*
armpmu
->
event_map
)[
config
];
return
mapping
==
HW_OP_UNSUPPORTED
?
-
E
OPNOTSUPP
:
mapping
;
int
mapping
=
(
*
event_map
)[
config
];
return
mapping
==
HW_OP_UNSUPPORTED
?
-
E
NOENT
:
mapping
;
}
static
int
armpmu_map_raw_event
(
u64
config
)
armpmu_map_raw_event
(
u
32
raw_event_mask
,
u
64
config
)
{
return
(
int
)(
config
&
armpmu
->
raw_event_mask
);
return
(
int
)(
config
&
raw_event_mask
);
}
static
int
static
int
map_cpu_event
(
struct
perf_event
*
event
,
const
unsigned
(
*
event_map
)[
PERF_COUNT_HW_MAX
],
const
unsigned
(
*
cache_map
)
[
PERF_COUNT_HW_CACHE_MAX
]
[
PERF_COUNT_HW_CACHE_OP_MAX
]
[
PERF_COUNT_HW_CACHE_RESULT_MAX
],
u32
raw_event_mask
)
{
u64
config
=
event
->
attr
.
config
;
switch
(
event
->
attr
.
type
)
{
case
PERF_TYPE_HARDWARE
:
return
armpmu_map_event
(
event_map
,
config
);
case
PERF_TYPE_HW_CACHE
:
return
armpmu_map_cache_event
(
cache_map
,
config
);
case
PERF_TYPE_RAW
:
return
armpmu_map_raw_event
(
raw_event_mask
,
config
);
}
return
-
ENOENT
;
}
int
armpmu_event_set_period
(
struct
perf_event
*
event
,
struct
hw_perf_event
*
hwc
,
int
idx
)
{
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
s64
left
=
local64_read
(
&
hwc
->
period_left
);
s64
period
=
hwc
->
sample_period
;
int
ret
=
0
;
...
...
@@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event,
return
ret
;
}
static
u64
u64
armpmu_event_update
(
struct
perf_event
*
event
,
struct
hw_perf_event
*
hwc
,
int
idx
,
int
overflow
)
{
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
u64
delta
,
prev_raw_count
,
new_raw_count
;
again:
...
...
@@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event)
static
void
armpmu_stop
(
struct
perf_event
*
event
,
int
flags
)
{
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
if
(
!
armpmu
)
return
;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
...
...
@@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags)
static
void
armpmu_start
(
struct
perf_event
*
event
,
int
flags
)
{
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
if
(
!
armpmu
)
return
;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
...
...
@@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags)
static
void
armpmu_del
(
struct
perf_event
*
event
,
int
flags
)
{
struct
cpu_hw_events
*
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
struct
pmu_hw_events
*
hw_events
=
armpmu
->
get_hw_events
();
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
int
idx
=
hwc
->
idx
;
WARN_ON
(
idx
<
0
);
clear_bit
(
idx
,
cpuc
->
active_mask
);
armpmu_stop
(
event
,
PERF_EF_UPDATE
);
cpuc
->
events
[
idx
]
=
NULL
;
clear_bit
(
idx
,
cpuc
->
used_mask
);
hw_events
->
events
[
idx
]
=
NULL
;
clear_bit
(
idx
,
hw_events
->
used_mask
);
perf_event_update_userpage
(
event
);
}
...
...
@@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags)
static
int
armpmu_add
(
struct
perf_event
*
event
,
int
flags
)
{
struct
cpu_hw_events
*
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
struct
pmu_hw_events
*
hw_events
=
armpmu
->
get_hw_events
();
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
int
idx
;
int
err
=
0
;
...
...
@@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags)
perf_pmu_disable
(
event
->
pmu
);
/* If we don't have a space for the counter then finish early. */
idx
=
armpmu
->
get_event_idx
(
cpuc
,
hwc
);
idx
=
armpmu
->
get_event_idx
(
hw_events
,
hwc
);
if
(
idx
<
0
)
{
err
=
idx
;
goto
out
;
...
...
@@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags)
*/
event
->
hw
.
idx
=
idx
;
armpmu
->
disable
(
hwc
,
idx
);
cpuc
->
events
[
idx
]
=
event
;
set_bit
(
idx
,
cpuc
->
active_mask
);
hw_events
->
events
[
idx
]
=
event
;
hwc
->
state
=
PERF_HES_STOPPED
|
PERF_HES_UPTODATE
;
if
(
flags
&
PERF_EF_START
)
...
...
@@ -345,25 +324,25 @@ armpmu_add(struct perf_event *event, int flags)
return
err
;
}
static
struct
pmu
pmu
;
static
int
validate_event
(
struct
cpu_hw_events
*
cpuc
,
validate_event
(
struct
pmu_hw_events
*
hw_events
,
struct
perf_event
*
event
)
{
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
struct
hw_perf_event
fake_event
=
event
->
hw
;
struct
pmu
*
leader_pmu
=
event
->
group_leader
->
pmu
;
if
(
event
->
pmu
!=
&
pmu
||
event
->
state
<=
PERF_EVENT_STATE_OFF
)
if
(
event
->
pmu
!=
leader_
pmu
||
event
->
state
<=
PERF_EVENT_STATE_OFF
)
return
1
;
return
armpmu
->
get_event_idx
(
cpuc
,
&
fake_event
)
>=
0
;
return
armpmu
->
get_event_idx
(
hw_events
,
&
fake_event
)
>=
0
;
}
static
int
validate_group
(
struct
perf_event
*
event
)
{
struct
perf_event
*
sibling
,
*
leader
=
event
->
group_leader
;
struct
cp
u_hw_events
fake_pmu
;
struct
pm
u_hw_events
fake_pmu
;
memset
(
&
fake_pmu
,
0
,
sizeof
(
fake_pmu
));
...
...
@@ -383,110 +362,119 @@ validate_group(struct perf_event *event)
static
irqreturn_t
armpmu_platform_irq
(
int
irq
,
void
*
dev
)
{
struct
arm_pmu_platdata
*
plat
=
dev_get_platdata
(
&
pmu_device
->
dev
);
struct
arm_pmu
*
armpmu
=
(
struct
arm_pmu
*
)
dev
;
struct
platform_device
*
plat_device
=
armpmu
->
plat_device
;
struct
arm_pmu_platdata
*
plat
=
dev_get_platdata
(
&
plat_device
->
dev
);
return
plat
->
handle_irq
(
irq
,
dev
,
armpmu
->
handle_irq
);
}
static
void
armpmu_release_hardware
(
struct
arm_pmu
*
armpmu
)
{
int
i
,
irq
,
irqs
;
struct
platform_device
*
pmu_device
=
armpmu
->
plat_device
;
irqs
=
min
(
pmu_device
->
num_resources
,
num_possible_cpus
());
for
(
i
=
0
;
i
<
irqs
;
++
i
)
{
if
(
!
cpumask_test_and_clear_cpu
(
i
,
&
armpmu
->
active_irqs
))
continue
;
irq
=
platform_get_irq
(
pmu_device
,
i
);
if
(
irq
>=
0
)
free_irq
(
irq
,
armpmu
);
}
release_pmu
(
armpmu
->
type
);
}
static
int
armpmu_reserve_hardware
(
void
)
armpmu_reserve_hardware
(
struct
arm_pmu
*
armpmu
)
{
struct
arm_pmu_platdata
*
plat
;
irq_handler_t
handle_irq
;
int
i
,
err
=
-
ENODEV
,
irq
;
int
i
,
err
,
irq
,
irqs
;
struct
platform_device
*
pmu_device
=
armpmu
->
plat_device
;
pmu_device
=
reserve_pmu
(
ARM_PMU_DEVICE_CPU
);
if
(
IS_ERR
(
pmu_device
)
)
{
err
=
reserve_pmu
(
armpmu
->
type
);
if
(
err
)
{
pr_warning
(
"unable to reserve pmu
\n
"
);
return
PTR_ERR
(
pmu_device
)
;
return
err
;
}
init_pmu
(
ARM_PMU_DEVICE_CPU
);
plat
=
dev_get_platdata
(
&
pmu_device
->
dev
);
if
(
plat
&&
plat
->
handle_irq
)
handle_irq
=
armpmu_platform_irq
;
else
handle_irq
=
armpmu
->
handle_irq
;
if
(
pmu_device
->
num_resources
<
1
)
{
irqs
=
min
(
pmu_device
->
num_resources
,
num_possible_cpus
());
if
(
irqs
<
1
)
{
pr_err
(
"no irqs for PMUs defined
\n
"
);
return
-
ENODEV
;
}
for
(
i
=
0
;
i
<
pmu_device
->
num_resources
;
++
i
)
{
for
(
i
=
0
;
i
<
irqs
;
++
i
)
{
err
=
0
;
irq
=
platform_get_irq
(
pmu_device
,
i
);
if
(
irq
<
0
)
continue
;
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if
(
irq_set_affinity
(
irq
,
cpumask_of
(
i
))
&&
irqs
>
1
)
{
pr_warning
(
"unable to set irq affinity (irq=%d, cpu=%u)
\n
"
,
irq
,
i
);
continue
;
}
err
=
request_irq
(
irq
,
handle_irq
,
IRQF_DISABLED
|
IRQF_NOBALANCING
,
"arm
pmu"
,
NULL
);
"arm
-pmu"
,
armpmu
);
if
(
err
)
{
pr_warning
(
"unable to request IRQ%d for ARM perf "
"counters
\n
"
,
irq
);
break
;
pr_err
(
"unable to request IRQ%d for ARM PMU counters
\n
"
,
irq
);
armpmu_release_hardware
(
armpmu
);
return
err
;
}
}
if
(
err
)
{
for
(
i
=
i
-
1
;
i
>=
0
;
--
i
)
{
irq
=
platform_get_irq
(
pmu_device
,
i
);
if
(
irq
>=
0
)
free_irq
(
irq
,
NULL
);
}
release_pmu
(
ARM_PMU_DEVICE_CPU
);
pmu_device
=
NULL
;
cpumask_set_cpu
(
i
,
&
armpmu
->
active_irqs
);
}
return
err
;
return
0
;
}
static
void
armpmu_release_hardware
(
void
)
hw_perf_event_destroy
(
struct
perf_event
*
event
)
{
int
i
,
irq
;
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
atomic_t
*
active_events
=
&
armpmu
->
active_events
;
struct
mutex
*
pmu_reserve_mutex
=
&
armpmu
->
reserve_mutex
;
for
(
i
=
pmu_device
->
num_resources
-
1
;
i
>=
0
;
--
i
)
{
irq
=
platform_get_irq
(
pmu_device
,
i
);
if
(
irq
>=
0
)
free_irq
(
irq
,
NULL
);
if
(
atomic_dec_and_mutex_lock
(
active_events
,
pmu_reserve_mutex
))
{
armpmu_release_hardware
(
armpmu
);
mutex_unlock
(
pmu_reserve_mutex
);
}
armpmu
->
stop
();
release_pmu
(
ARM_PMU_DEVICE_CPU
);
pmu_device
=
NULL
;
}
static
atomic_t
active_events
=
ATOMIC_INIT
(
0
);
static
DEFINE_MUTEX
(
pmu_reserve_mutex
);
static
void
hw_perf_event_destroy
(
struct
perf_event
*
event
)
static
int
event_requires_mode_exclusion
(
struct
perf_event_attr
*
attr
)
{
if
(
atomic_dec_and_mutex_lock
(
&
active_events
,
&
pmu_reserve_mutex
))
{
armpmu_release_hardware
();
mutex_unlock
(
&
pmu_reserve_mutex
);
}
return
attr
->
exclude_idle
||
attr
->
exclude_user
||
attr
->
exclude_kernel
||
attr
->
exclude_hv
;
}
static
int
__hw_perf_event_init
(
struct
perf_event
*
event
)
{
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
int
mapping
,
err
;
/* Decode the generic type into an ARM event identifier. */
if
(
PERF_TYPE_HARDWARE
==
event
->
attr
.
type
)
{
mapping
=
armpmu_map_event
(
event
->
attr
.
config
);
}
else
if
(
PERF_TYPE_HW_CACHE
==
event
->
attr
.
type
)
{
mapping
=
armpmu_map_cache_event
(
event
->
attr
.
config
);
}
else
if
(
PERF_TYPE_RAW
==
event
->
attr
.
type
)
{
mapping
=
armpmu_map_raw_event
(
event
->
attr
.
config
);
}
else
{
pr_debug
(
"event type %x not supported
\n
"
,
event
->
attr
.
type
);
return
-
EOPNOTSUPP
;
}
mapping
=
armpmu
->
map_event
(
event
);
if
(
mapping
<
0
)
{
pr_debug
(
"event %x:%llx not supported
\n
"
,
event
->
attr
.
type
,
...
...
@@ -494,35 +482,32 @@ __hw_perf_event_init(struct perf_event *event)
return
mapping
;
}
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
*/
hwc
->
idx
=
-
1
;
hwc
->
config_base
=
0
;
hwc
->
config
=
0
;
hwc
->
event_base
=
0
;
/*
* Check whether we need to exclude the counter from certain modes.
* The ARM performance counters are on all of the time so if someone
* has asked us for some excludes then we have to fail.
*/
if
(
event
->
attr
.
exclude_kernel
||
event
->
attr
.
exclude_user
||
event
->
attr
.
exclude_hv
||
event
->
attr
.
exclude_idle
)
{
if
((
!
armpmu
->
set_event_filter
||
armpmu
->
set_event_filter
(
hwc
,
&
event
->
attr
))
&&
event_requires_mode_exclusion
(
&
event
->
attr
))
{
pr_debug
(
"ARM performance counters do not support "
"mode exclusion
\n
"
);
return
-
EPERM
;
}
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
* Store the event encoding into the config_base field.
*/
hwc
->
idx
=
-
1
;
/*
* Store the event encoding into the config_base field. config and
* event_base are unused as the only 2 things we need to know are
* the event mapping and the counter to use. The counter to use is
* also the indx and the config_base is the event type.
*/
hwc
->
config_base
=
(
unsigned
long
)
mapping
;
hwc
->
config
=
0
;
hwc
->
event_base
=
0
;
hwc
->
config_base
|=
(
unsigned
long
)
mapping
;
if
(
!
hwc
->
sample_period
)
{
hwc
->
sample_period
=
armpmu
->
max_period
;
...
...
@@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event)
static
int
armpmu_event_init
(
struct
perf_event
*
event
)
{
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
event
->
pmu
);
int
err
=
0
;
atomic_t
*
active_events
=
&
armpmu
->
active_events
;
switch
(
event
->
attr
.
type
)
{
case
PERF_TYPE_RAW
:
case
PERF_TYPE_HARDWARE
:
case
PERF_TYPE_HW_CACHE
:
break
;
default:
if
(
armpmu
->
map_event
(
event
)
==
-
ENOENT
)
return
-
ENOENT
;
}
if
(
!
armpmu
)
return
-
ENODEV
;
event
->
destroy
=
hw_perf_event_destroy
;
if
(
!
atomic_inc_not_zero
(
&
active_events
))
{
mutex_lock
(
&
pmu_reserve_mutex
);
if
(
atomic_read
(
&
active_events
)
==
0
)
{
err
=
armpmu_reserve_hardware
();
}
if
(
!
atomic_inc_not_zero
(
active_events
))
{
mutex_lock
(
&
armpmu
->
reserve_mutex
);
if
(
atomic_read
(
active_events
)
==
0
)
err
=
armpmu_reserve_hardware
(
armpmu
);
if
(
!
err
)
atomic_inc
(
&
active_events
);
mutex_unlock
(
&
pmu_
reserve_mutex
);
atomic_inc
(
active_events
);
mutex_unlock
(
&
armpmu
->
reserve_mutex
);
}
if
(
err
)
...
...
@@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event)
static
void
armpmu_enable
(
struct
pmu
*
pmu
)
{
/* Enable all of the perf events on hardware. */
int
idx
,
enabled
=
0
;
struct
cpu_hw_events
*
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
if
(
!
armpmu
)
return
;
for
(
idx
=
0
;
idx
<=
armpmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
if
(
!
event
)
continue
;
armpmu
->
enable
(
&
event
->
hw
,
idx
);
enabled
=
1
;
}
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
pmu
);
struct
pmu_hw_events
*
hw_events
=
armpmu
->
get_hw_events
();
int
enabled
=
bitmap_weight
(
hw_events
->
used_mask
,
armpmu
->
num_events
);
if
(
enabled
)
armpmu
->
start
();
...
...
@@ -605,20 +568,32 @@ static void armpmu_enable(struct pmu *pmu)
static
void
armpmu_disable
(
struct
pmu
*
pmu
)
{
if
(
armpmu
)
armpmu
->
stop
();
struct
arm_pmu
*
armpmu
=
to_arm_pmu
(
pmu
);
armpmu
->
stop
();
}
static
struct
pmu
pmu
=
{
.
pmu_enable
=
armpmu_enable
,
.
pmu_disable
=
armpmu_disable
,
.
event_init
=
armpmu_event_init
,
.
add
=
armpmu_add
,
.
del
=
armpmu_del
,
.
start
=
armpmu_start
,
.
stop
=
armpmu_stop
,
.
read
=
armpmu_read
,
};
static
void
__init
armpmu_init
(
struct
arm_pmu
*
armpmu
)
{
atomic_set
(
&
armpmu
->
active_events
,
0
);
mutex_init
(
&
armpmu
->
reserve_mutex
);
armpmu
->
pmu
=
(
struct
pmu
)
{
.
pmu_enable
=
armpmu_enable
,
.
pmu_disable
=
armpmu_disable
,
.
event_init
=
armpmu_event_init
,
.
add
=
armpmu_add
,
.
del
=
armpmu_del
,
.
start
=
armpmu_start
,
.
stop
=
armpmu_stop
,
.
read
=
armpmu_read
,
};
}
int
__init
armpmu_register
(
struct
arm_pmu
*
armpmu
,
char
*
name
,
int
type
)
{
armpmu_init
(
armpmu
);
return
perf_pmu_register
(
&
armpmu
->
pmu
,
name
,
type
);
}
/* Include the PMU-specific implementations. */
#include "perf_event_xscale.c"
...
...
@@ -630,14 +605,72 @@ static struct pmu pmu = {
* This requires SMP to be available, so exists as a separate initcall.
*/
static
int
__init
armpmu_reset
(
void
)
cpu_pmu_reset
(
void
)
{
if
(
cpu_pmu
&&
cpu_pmu
->
reset
)
return
on_each_cpu
(
cpu_pmu
->
reset
,
NULL
,
1
);
return
0
;
}
arch_initcall
(
cpu_pmu_reset
);
/*
* PMU platform driver and devicetree bindings.
*/
static
struct
of_device_id
armpmu_of_device_ids
[]
=
{
{.
compatible
=
"arm,cortex-a9-pmu"
},
{.
compatible
=
"arm,cortex-a8-pmu"
},
{.
compatible
=
"arm,arm1136-pmu"
},
{.
compatible
=
"arm,arm1176-pmu"
},
{},
};
static
struct
platform_device_id
armpmu_plat_device_ids
[]
=
{
{.
name
=
"arm-pmu"
},
{},
};
static
int
__devinit
armpmu_device_probe
(
struct
platform_device
*
pdev
)
{
if
(
armpmu
&&
armpmu
->
reset
)
return
on_each_cpu
(
armpmu
->
reset
,
NULL
,
1
);
cpu_pmu
->
plat_device
=
pdev
;
return
0
;
}
arch_initcall
(
armpmu_reset
);
static
struct
platform_driver
armpmu_driver
=
{
.
driver
=
{
.
name
=
"arm-pmu"
,
.
of_match_table
=
armpmu_of_device_ids
,
},
.
probe
=
armpmu_device_probe
,
.
id_table
=
armpmu_plat_device_ids
,
};
static
int
__init
register_pmu_driver
(
void
)
{
return
platform_driver_register
(
&
armpmu_driver
);
}
device_initcall
(
register_pmu_driver
);
static
struct
pmu_hw_events
*
armpmu_get_cpu_events
(
void
)
{
return
&
__get_cpu_var
(
cpu_hw_events
);
}
static
void
__init
cpu_pmu_init
(
struct
arm_pmu
*
armpmu
)
{
int
cpu
;
for_each_possible_cpu
(
cpu
)
{
struct
pmu_hw_events
*
events
=
&
per_cpu
(
cpu_hw_events
,
cpu
);
events
->
events
=
per_cpu
(
hw_events
,
cpu
);
events
->
used_mask
=
per_cpu
(
used_mask
,
cpu
);
raw_spin_lock_init
(
&
events
->
pmu_lock
);
}
armpmu
->
get_hw_events
=
armpmu_get_cpu_events
;
armpmu
->
type
=
ARM_PMU_DEVICE_CPU
;
}
/*
* CPU PMU identification and registration.
*/
static
int
__init
init_hw_perf_events
(
void
)
{
...
...
@@ -651,22 +684,22 @@ init_hw_perf_events(void)
case
0xB360
:
/* ARM1136 */
case
0xB560
:
/* ARM1156 */
case
0xB760
:
/* ARM1176 */
arm
pmu
=
armv6pmu_init
();
cpu_
pmu
=
armv6pmu_init
();
break
;
case
0xB020
:
/* ARM11mpcore */
arm
pmu
=
armv6mpcore_pmu_init
();
cpu_
pmu
=
armv6mpcore_pmu_init
();
break
;
case
0xC080
:
/* Cortex-A8 */
arm
pmu
=
armv7_a8_pmu_init
();
cpu_
pmu
=
armv7_a8_pmu_init
();
break
;
case
0xC090
:
/* Cortex-A9 */
arm
pmu
=
armv7_a9_pmu_init
();
cpu_
pmu
=
armv7_a9_pmu_init
();
break
;
case
0xC050
:
/* Cortex-A5 */
arm
pmu
=
armv7_a5_pmu_init
();
cpu_
pmu
=
armv7_a5_pmu_init
();
break
;
case
0xC0F0
:
/* Cortex-A15 */
arm
pmu
=
armv7_a15_pmu_init
();
cpu_
pmu
=
armv7_a15_pmu_init
();
break
;
}
/* Intel CPUs [xscale]. */
...
...
@@ -674,23 +707,23 @@ init_hw_perf_events(void)
part_number
=
(
cpuid
>>
13
)
&
0x7
;
switch
(
part_number
)
{
case
1
:
arm
pmu
=
xscale1pmu_init
();
cpu_
pmu
=
xscale1pmu_init
();
break
;
case
2
:
arm
pmu
=
xscale2pmu_init
();
cpu_
pmu
=
xscale2pmu_init
();
break
;
}
}
if
(
arm
pmu
)
{
if
(
cpu_
pmu
)
{
pr_info
(
"enabled with %s PMU driver, %d counters available
\n
"
,
armpmu
->
name
,
armpmu
->
num_events
);
cpu_pmu
->
name
,
cpu_pmu
->
num_events
);
cpu_pmu_init
(
cpu_pmu
);
armpmu_register
(
cpu_pmu
,
"cpu"
,
PERF_TYPE_RAW
);
}
else
{
pr_info
(
"no hardware support available
\n
"
);
}
perf_pmu_register
(
&
pmu
,
"cpu"
,
PERF_TYPE_RAW
);
return
0
;
}
early_initcall
(
init_hw_perf_events
);
...
...
arch/arm/kernel/perf_event_v6.c
View file @
4fb0d2ea
...
...
@@ -54,7 +54,7 @@ enum armv6_perf_types {
};
enum
armv6_counters
{
ARMV6_CYCLE_COUNTER
=
1
,
ARMV6_CYCLE_COUNTER
=
0
,
ARMV6_COUNTER0
,
ARMV6_COUNTER1
,
};
...
...
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
int
idx
)
{
unsigned
long
val
,
mask
,
evt
,
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
if
(
ARMV6_CYCLE_COUNTER
==
idx
)
{
mask
=
0
;
...
...
@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
&=
~
mask
;
val
|=
evt
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
counter_is_active
(
unsigned
long
pmcr
,
int
idx
)
{
unsigned
long
mask
=
0
;
if
(
idx
==
ARMV6_CYCLE_COUNTER
)
mask
=
ARMV6_PMCR_CCOUNT_IEN
;
else
if
(
idx
==
ARMV6_COUNTER0
)
mask
=
ARMV6_PMCR_COUNT0_IEN
;
else
if
(
idx
==
ARMV6_COUNTER1
)
mask
=
ARMV6_PMCR_COUNT1_IEN
;
if
(
mask
)
return
pmcr
&
mask
;
WARN_ONCE
(
1
,
"invalid counter number (%d)
\n
"
,
idx
);
return
0
;
}
static
irqreturn_t
...
...
@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
{
unsigned
long
pmcr
=
armv6_pmcr_read
();
struct
perf_sample_data
data
;
struct
cp
u_hw_events
*
cpuc
;
struct
pm
u_hw_events
*
cpuc
;
struct
pt_regs
*
regs
;
int
idx
;
...
...
@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
perf_sample_data_init
(
&
data
,
0
);
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
for
(
idx
=
0
;
idx
<
=
arm
pmu
->
num_events
;
++
idx
)
{
for
(
idx
=
0
;
idx
<
cpu_
pmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
struct
hw_perf_event
*
hwc
;
if
(
!
test_bit
(
idx
,
cpuc
->
active_mask
))
if
(
!
counter_is_active
(
pmcr
,
idx
))
continue
;
/*
...
...
@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
continue
;
if
(
perf_event_overflow
(
event
,
&
data
,
regs
))
arm
pmu
->
disable
(
hwc
,
idx
);
cpu_
pmu
->
disable
(
hwc
,
idx
);
}
/*
...
...
@@ -527,28 +545,30 @@ static void
armv6pmu_start
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
|=
ARMV6_PMCR_ENABLE
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
armv6pmu_stop
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
&=
~
ARMV6_PMCR_ENABLE
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
armv6pmu_get_event_idx
(
struct
cp
u_hw_events
*
cpuc
,
armv6pmu_get_event_idx
(
struct
pm
u_hw_events
*
cpuc
,
struct
hw_perf_event
*
event
)
{
/* Always place a cycle counter into the cycle counter. */
...
...
@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
int
idx
)
{
unsigned
long
val
,
mask
,
evt
,
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
if
(
ARMV6_CYCLE_COUNTER
==
idx
)
{
mask
=
ARMV6_PMCR_CCOUNT_IEN
;
...
...
@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
&=
~
mask
;
val
|=
evt
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
...
...
@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int
idx
)
{
unsigned
long
val
,
mask
,
flags
,
evt
=
0
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
if
(
ARMV6_CYCLE_COUNTER
==
idx
)
{
mask
=
ARMV6_PMCR_CCOUNT_IEN
;
...
...
@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
&=
~
mask
;
val
|=
evt
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
armv6_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
armv6_perf_map
,
&
armv6_perf_cache_map
,
0xFF
);
}
static
const
struct
arm_pmu
armv6pmu
=
{
static
struct
arm_pmu
armv6pmu
=
{
.
id
=
ARM_PERF_PMU_ID_V6
,
.
name
=
"v6"
,
.
handle_irq
=
armv6pmu_handle_irq
,
...
...
@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
.
get_event_idx
=
armv6pmu_get_event_idx
,
.
start
=
armv6pmu_start
,
.
stop
=
armv6pmu_stop
,
.
cache_map
=
&
armv6_perf_cache_map
,
.
event_map
=
&
armv6_perf_map
,
.
raw_event_mask
=
0xFF
,
.
map_event
=
armv6_map_event
,
.
num_events
=
3
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
static
const
struct
arm_pmu
*
__init
armv6pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv6pmu_init
(
void
)
{
return
&
armv6pmu
;
}
...
...
@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
* disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting.
*/
static
const
struct
arm_pmu
armv6mpcore_pmu
=
{
static
int
armv6mpcore_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
armv6mpcore_perf_map
,
&
armv6mpcore_perf_cache_map
,
0xFF
);
}
static
struct
arm_pmu
armv6mpcore_pmu
=
{
.
id
=
ARM_PERF_PMU_ID_V6MP
,
.
name
=
"v6mpcore"
,
.
handle_irq
=
armv6pmu_handle_irq
,
...
...
@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
.
get_event_idx
=
armv6pmu_get_event_idx
,
.
start
=
armv6pmu_start
,
.
stop
=
armv6pmu_stop
,
.
cache_map
=
&
armv6mpcore_perf_cache_map
,
.
event_map
=
&
armv6mpcore_perf_map
,
.
raw_event_mask
=
0xFF
,
.
map_event
=
armv6mpcore_map_event
,
.
num_events
=
3
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
static
const
struct
arm_pmu
*
__init
armv6mpcore_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv6mpcore_pmu_init
(
void
)
{
return
&
armv6mpcore_pmu
;
}
#else
static
const
struct
arm_pmu
*
__init
armv6pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv6pmu_init
(
void
)
{
return
NULL
;
}
static
const
struct
arm_pmu
*
__init
armv6mpcore_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv6mpcore_pmu_init
(
void
)
{
return
NULL
;
}
...
...
arch/arm/kernel/perf_event_v7.c
View file @
4fb0d2ea
...
...
@@ -17,6 +17,9 @@
*/
#ifdef CONFIG_CPU_V7
static
struct
arm_pmu
armv7pmu
;
/*
* Common ARMv7 event types
*
...
...
@@ -676,23 +679,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
};
/*
* Perf Events
counter
s
* Perf Events
' indice
s
*/
enum
armv7_counters
{
ARMV7_CYCLE_COUNTER
=
1
,
/* Cycle counter */
ARMV7_COUNTER0
=
2
,
/* First event counter */
};
#define ARMV7_IDX_CYCLE_COUNTER 0
#define ARMV7_IDX_COUNTER0 1
#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
#define ARMV7_MAX_COUNTERS 32
#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
/*
* The cycle counter is ARMV7_CYCLE_COUNTER.
* The first event counter is ARMV7_COUNTER0.
* The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
* ARMv7 low level PMNC access
*/
#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
/*
*
ARMv7 low level PMNC access
*
Perf Event to low level counters mapping
*/
#define ARMV7_IDX_TO_COUNTER(x) \
(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
/*
* Per-CPU PMNC: config reg
...
...
@@ -708,103 +712,76 @@ enum armv7_counters {
#define ARMV7_PMNC_MASK 0x3f
/* Mask for writable bits */
/*
* Available counters
*/
#define ARMV7_CNT0 0
/* First event counter */
#define ARMV7_CCNT 31
/* Cycle counter */
/* Perf Event to low level counters mapping */
#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
/*
* CNTENS: counters enable reg
*/
#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
/*
* CNTENC: counters disable reg
*/
#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
/*
* INTENS: counters overflow interrupt enable reg
*/
#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
/*
* INTENC: counters overflow interrupt disable reg
*/
#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
/*
* EVTSEL: Event selection reg
* FLAG: counters overflow flag status reg
*/
#define ARMV7_EVTSEL_MASK 0xff
/* Mask for writable bits */
#define ARMV7_FLAG_MASK 0xffffffff
/* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
/*
*
SELECT: Counter
selection reg
*
PMXEVTYPER: Event
selection reg
*/
#define ARMV7_SELECT_MASK 0x1f
/* Mask for writable bits */
#define ARMV7_EVTYPE_MASK 0xc00000ff
/* Mask for writable bits */
#define ARMV7_EVTYPE_EVENT 0xff
/* Mask for EVENT bits */
/*
*
FLAG: counters overflow flag status reg
*
Event filters for PMUv2
*/
#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
#define ARMV7_FLAG_MASK 0xffffffff
/* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
#define ARMV7_EXCLUDE_PL1 (1 << 31)
#define ARMV7_EXCLUDE_USER (1 << 30)
#define ARMV7_INCLUDE_HYP (1 << 27)
static
inline
u
nsigned
long
armv7_pmnc_read
(
void
)
static
inline
u
32
armv7_pmnc_read
(
void
)
{
u32
val
;
asm
volatile
(
"mrc p15, 0, %0, c9, c12, 0"
:
"=r"
(
val
));
return
val
;
}
static
inline
void
armv7_pmnc_write
(
u
nsigned
long
val
)
static
inline
void
armv7_pmnc_write
(
u
32
val
)
{
val
&=
ARMV7_PMNC_MASK
;
isb
();
asm
volatile
(
"mcr p15, 0, %0, c9, c12, 0"
:
:
"r"
(
val
));
}
static
inline
int
armv7_pmnc_has_overflowed
(
u
nsigned
long
pmnc
)
static
inline
int
armv7_pmnc_has_overflowed
(
u
32
pmnc
)
{
return
pmnc
&
ARMV7_OVERFLOWED_MASK
;
}
static
inline
int
armv7_pmnc_counter_has_overflowed
(
unsigned
long
pmnc
,
enum
armv7_counters
counter
)
static
inline
int
armv7_pmnc_counter_valid
(
int
idx
)
{
return
idx
>=
ARMV7_IDX_CYCLE_COUNTER
&&
idx
<=
ARMV7_IDX_COUNTER_LAST
;
}
static
inline
int
armv7_pmnc_counter_has_overflowed
(
u32
pmnc
,
int
idx
)
{
int
ret
=
0
;
u32
counter
;
if
(
counter
==
ARMV7_CYCLE_COUNTER
)
ret
=
pmnc
&
ARMV7_FLAG_C
;
else
if
((
counter
>=
ARMV7_COUNTER0
)
&&
(
counter
<=
ARMV7_COUNTER_LAST
))
ret
=
pmnc
&
ARMV7_FLAG_P
(
counter
);
else
if
(
!
armv7_pmnc_counter_valid
(
idx
))
{
pr_err
(
"CPU%u checking wrong counter %d overflow status
\n
"
,
smp_processor_id
(),
counter
);
smp_processor_id
(),
idx
);
}
else
{
counter
=
ARMV7_IDX_TO_COUNTER
(
idx
);
ret
=
pmnc
&
BIT
(
counter
);
}
return
ret
;
}
static
inline
int
armv7_pmnc_select_counter
(
unsigned
int
idx
)
static
inline
int
armv7_pmnc_select_counter
(
int
idx
)
{
u32
val
;
u32
counter
;
if
(
(
idx
<
ARMV7_COUNTER0
)
||
(
idx
>
ARMV7_COUNTER_LAST
))
{
pr_err
(
"CPU%u selecting wrong PMNC counter
"
" %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
1
;
if
(
!
armv7_pmnc_counter_valid
(
idx
))
{
pr_err
(
"CPU%u selecting wrong PMNC counter
%d
\n
"
,
smp_processor_id
(),
idx
);
return
-
EINVAL
;
}
val
=
(
idx
-
ARMV7_EVENT_CNT_TO_CNTx
)
&
ARMV7_SELECT_MASK
;
asm
volatile
(
"mcr p15, 0, %0, c9, c12, 5"
:
:
"r"
(
val
));
counter
=
ARMV7_IDX_TO_COUNTER
(
idx
)
;
asm
volatile
(
"mcr p15, 0, %0, c9, c12, 5"
:
:
"r"
(
counter
));
isb
();
return
idx
;
...
...
@@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
static
inline
u32
armv7pmu_read_counter
(
int
idx
)
{
u
nsigned
long
value
=
0
;
u
32
value
=
0
;
if
(
idx
==
ARMV7_CYCLE_COUNTER
)
asm
volatile
(
"mrc p15, 0, %0, c9, c13, 0"
:
"=r"
(
value
));
else
if
((
idx
>=
ARMV7_COUNTER0
)
&&
(
idx
<=
ARMV7_COUNTER_LAST
))
{
if
(
armv7_pmnc_select_counter
(
idx
)
==
idx
)
asm
volatile
(
"mrc p15, 0, %0, c9, c13, 2"
:
"=r"
(
value
));
}
else
if
(
!
armv7_pmnc_counter_valid
(
idx
))
pr_err
(
"CPU%u reading wrong counter %d
\n
"
,
smp_processor_id
(),
idx
);
else
if
(
idx
==
ARMV7_IDX_CYCLE_COUNTER
)
asm
volatile
(
"mrc p15, 0, %0, c9, c13, 0"
:
"=r"
(
value
));
else
if
(
armv7_pmnc_select_counter
(
idx
)
==
idx
)
asm
volatile
(
"mrc p15, 0, %0, c9, c13, 2"
:
"=r"
(
value
));
return
value
;
}
static
inline
void
armv7pmu_write_counter
(
int
idx
,
u32
value
)
{
if
(
idx
==
ARMV7_CYCLE_COUNTER
)
asm
volatile
(
"mcr p15, 0, %0, c9, c13, 0"
:
:
"r"
(
value
));
else
if
((
idx
>=
ARMV7_COUNTER0
)
&&
(
idx
<=
ARMV7_COUNTER_LAST
))
{
if
(
armv7_pmnc_select_counter
(
idx
)
==
idx
)
asm
volatile
(
"mcr p15, 0, %0, c9, c13, 2"
:
:
"r"
(
value
));
}
else
if
(
!
armv7_pmnc_counter_valid
(
idx
))
pr_err
(
"CPU%u writing wrong counter %d
\n
"
,
smp_processor_id
(),
idx
);
else
if
(
idx
==
ARMV7_IDX_CYCLE_COUNTER
)
asm
volatile
(
"mcr p15, 0, %0, c9, c13, 0"
:
:
"r"
(
value
));
else
if
(
armv7_pmnc_select_counter
(
idx
)
==
idx
)
asm
volatile
(
"mcr p15, 0, %0, c9, c13, 2"
:
:
"r"
(
value
));
}
static
inline
void
armv7_pmnc_write_evtsel
(
unsigned
int
idx
,
u32
val
)
static
inline
void
armv7_pmnc_write_evtsel
(
int
idx
,
u32
val
)
{
if
(
armv7_pmnc_select_counter
(
idx
)
==
idx
)
{
val
&=
ARMV7_EVT
SEL
_MASK
;
val
&=
ARMV7_EVT
YPE
_MASK
;
asm
volatile
(
"mcr p15, 0, %0, c9, c13, 1"
:
:
"r"
(
val
));
}
}
static
inline
u32
armv7_pmnc_enable_counter
(
unsigned
int
idx
)
static
inline
int
armv7_pmnc_enable_counter
(
int
idx
)
{
u32
val
;
u32
counter
;
if
((
idx
!=
ARMV7_CYCLE_COUNTER
)
&&
((
idx
<
ARMV7_COUNTER0
)
||
(
idx
>
ARMV7_COUNTER_LAST
)))
{
pr_err
(
"CPU%u enabling wrong PMNC counter"
" %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
1
;
if
(
!
armv7_pmnc_counter_valid
(
idx
))
{
pr_err
(
"CPU%u enabling wrong PMNC counter %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
EINVAL
;
}
if
(
idx
==
ARMV7_CYCLE_COUNTER
)
val
=
ARMV7_CNTENS_C
;
else
val
=
ARMV7_CNTENS_P
(
idx
);
asm
volatile
(
"mcr p15, 0, %0, c9, c12, 1"
:
:
"r"
(
val
));
counter
=
ARMV7_IDX_TO_COUNTER
(
idx
);
asm
volatile
(
"mcr p15, 0, %0, c9, c12, 1"
:
:
"r"
(
BIT
(
counter
)));
return
idx
;
}
static
inline
u32
armv7_pmnc_disable_counter
(
unsigned
int
idx
)
static
inline
int
armv7_pmnc_disable_counter
(
int
idx
)
{
u32
val
;
u32
counter
;
if
((
idx
!=
ARMV7_CYCLE_COUNTER
)
&&
((
idx
<
ARMV7_COUNTER0
)
||
(
idx
>
ARMV7_COUNTER_LAST
)))
{
pr_err
(
"CPU%u disabling wrong PMNC counter"
" %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
1
;
if
(
!
armv7_pmnc_counter_valid
(
idx
))
{
pr_err
(
"CPU%u disabling wrong PMNC counter %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
EINVAL
;
}
if
(
idx
==
ARMV7_CYCLE_COUNTER
)
val
=
ARMV7_CNTENC_C
;
else
val
=
ARMV7_CNTENC_P
(
idx
);
asm
volatile
(
"mcr p15, 0, %0, c9, c12, 2"
:
:
"r"
(
val
));
counter
=
ARMV7_IDX_TO_COUNTER
(
idx
);
asm
volatile
(
"mcr p15, 0, %0, c9, c12, 2"
:
:
"r"
(
BIT
(
counter
)));
return
idx
;
}
static
inline
u32
armv7_pmnc_enable_intens
(
unsigned
int
idx
)
static
inline
int
armv7_pmnc_enable_intens
(
int
idx
)
{
u32
val
;
u32
counter
;
if
((
idx
!=
ARMV7_CYCLE_COUNTER
)
&&
((
idx
<
ARMV7_COUNTER0
)
||
(
idx
>
ARMV7_COUNTER_LAST
)))
{
pr_err
(
"CPU%u enabling wrong PMNC counter"
" interrupt enable %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
1
;
if
(
!
armv7_pmnc_counter_valid
(
idx
))
{
pr_err
(
"CPU%u enabling wrong PMNC counter IRQ enable %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
EINVAL
;
}
if
(
idx
==
ARMV7_CYCLE_COUNTER
)
val
=
ARMV7_INTENS_C
;
else
val
=
ARMV7_INTENS_P
(
idx
);
asm
volatile
(
"mcr p15, 0, %0, c9, c14, 1"
:
:
"r"
(
val
));
counter
=
ARMV7_IDX_TO_COUNTER
(
idx
);
asm
volatile
(
"mcr p15, 0, %0, c9, c14, 1"
:
:
"r"
(
BIT
(
counter
)));
return
idx
;
}
static
inline
u32
armv7_pmnc_disable_intens
(
unsigned
int
idx
)
static
inline
int
armv7_pmnc_disable_intens
(
int
idx
)
{
u32
val
;
u32
counter
;
if
((
idx
!=
ARMV7_CYCLE_COUNTER
)
&&
((
idx
<
ARMV7_COUNTER0
)
||
(
idx
>
ARMV7_COUNTER_LAST
)))
{
pr_err
(
"CPU%u disabling wrong PMNC counter"
" interrupt enable %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
1
;
if
(
!
armv7_pmnc_counter_valid
(
idx
))
{
pr_err
(
"CPU%u disabling wrong PMNC counter IRQ enable %d
\n
"
,
smp_processor_id
(),
idx
);
return
-
EINVAL
;
}
if
(
idx
==
ARMV7_CYCLE_COUNTER
)
val
=
ARMV7_INTENC_C
;
else
val
=
ARMV7_INTENC_P
(
idx
);
asm
volatile
(
"mcr p15, 0, %0, c9, c14, 2"
:
:
"r"
(
val
));
counter
=
ARMV7_IDX_TO_COUNTER
(
idx
);
asm
volatile
(
"mcr p15, 0, %0, c9, c14, 2"
:
:
"r"
(
BIT
(
counter
)));
return
idx
;
}
...
...
@@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void)
asm
volatile
(
"mrc p15, 0, %0, c9, c13, 0"
:
"=r"
(
val
));
printk
(
KERN_INFO
"CCNT =0x%08x
\n
"
,
val
);
for
(
cnt
=
ARMV7_
COUNTER0
;
cnt
<
ARMV7
_COUNTER_LAST
;
cnt
++
)
{
for
(
cnt
=
ARMV7_
IDX_COUNTER0
;
cnt
<=
ARMV7_IDX
_COUNTER_LAST
;
cnt
++
)
{
armv7_pmnc_select_counter
(
cnt
);
asm
volatile
(
"mrc p15, 0, %0, c9, c13, 2"
:
"=r"
(
val
));
printk
(
KERN_INFO
"CNT[%d] count =0x%08x
\n
"
,
cnt
-
ARMV7_EVENT_CNT_TO_CNTx
,
val
);
ARMV7_IDX_TO_COUNTER
(
cnt
)
,
val
);
asm
volatile
(
"mrc p15, 0, %0, c9, c13, 1"
:
"=r"
(
val
));
printk
(
KERN_INFO
"CNT[%d] evtsel=0x%08x
\n
"
,
cnt
-
ARMV7_EVENT_CNT_TO_CNTx
,
val
);
ARMV7_IDX_TO_COUNTER
(
cnt
)
,
val
);
}
}
#endif
...
...
@@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void)
static
void
armv7pmu_enable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
/*
* Disable counter
...
...
@@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
/*
* Set event (if destined for PMNx counters)
* We don't need to set the event if it's a cycle count
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
*/
if
(
idx
!=
ARMV7
_CYCLE_COUNTER
)
if
(
armv7pmu
.
set_event_filter
||
idx
!=
ARMV7_IDX
_CYCLE_COUNTER
)
armv7_pmnc_write_evtsel
(
idx
,
hwc
->
config_base
);
/*
...
...
@@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
*/
armv7_pmnc_enable_counter
(
idx
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
armv7pmu_disable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
/*
* Disable counter and interrupt
*/
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
/*
* Disable counter
...
...
@@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
*/
armv7_pmnc_disable_intens
(
idx
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
irqreturn_t
armv7pmu_handle_irq
(
int
irq_num
,
void
*
dev
)
{
u
nsigned
long
pmnc
;
u
32
pmnc
;
struct
perf_sample_data
data
;
struct
cp
u_hw_events
*
cpuc
;
struct
pm
u_hw_events
*
cpuc
;
struct
pt_regs
*
regs
;
int
idx
;
...
...
@@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init
(
&
data
,
0
);
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
for
(
idx
=
0
;
idx
<
=
arm
pmu
->
num_events
;
++
idx
)
{
for
(
idx
=
0
;
idx
<
cpu_
pmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
struct
hw_perf_event
*
hwc
;
if
(
!
test_bit
(
idx
,
cpuc
->
active_mask
))
continue
;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
...
...
@@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue
;
if
(
perf_event_overflow
(
event
,
&
data
,
regs
))
arm
pmu
->
disable
(
hwc
,
idx
);
cpu_
pmu
->
disable
(
hwc
,
idx
);
}
/*
...
...
@@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
static
void
armv7pmu_start
(
void
)
{
unsigned
long
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
/* Enable all counters */
armv7_pmnc_write
(
armv7_pmnc_read
()
|
ARMV7_PMNC_E
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
armv7pmu_stop
(
void
)
{
unsigned
long
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
/* Disable all counters */
armv7_pmnc_write
(
armv7_pmnc_read
()
&
~
ARMV7_PMNC_E
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
armv7pmu_get_event_idx
(
struct
cp
u_hw_events
*
cpuc
,
static
int
armv7pmu_get_event_idx
(
struct
pm
u_hw_events
*
cpuc
,
struct
hw_perf_event
*
event
)
{
int
idx
;
unsigned
long
evtype
=
event
->
config_base
&
ARMV7_EVTYPE_EVENT
;
/* Always place a cycle counter into the cycle counter. */
if
(
ev
ent
->
config_bas
e
==
ARMV7_PERFCTR_CPU_CYCLES
)
{
if
(
test_and_set_bit
(
ARMV7_CYCLE_COUNTER
,
cpuc
->
used_mask
))
if
(
ev
typ
e
==
ARMV7_PERFCTR_CPU_CYCLES
)
{
if
(
test_and_set_bit
(
ARMV7_
IDX_
CYCLE_COUNTER
,
cpuc
->
used_mask
))
return
-
EAGAIN
;
return
ARMV7_CYCLE_COUNTER
;
}
else
{
/*
* For anything other than a cycle counter, try and use
* the events counters
*/
for
(
idx
=
ARMV7_COUNTER0
;
idx
<=
armpmu
->
num_events
;
++
idx
)
{
if
(
!
test_and_set_bit
(
idx
,
cpuc
->
used_mask
))
return
idx
;
}
return
ARMV7_IDX_CYCLE_COUNTER
;
}
/* The counters are all in use. */
return
-
EAGAIN
;
/*
* For anything other than a cycle counter, try and use
* the events counters
*/
for
(
idx
=
ARMV7_IDX_COUNTER0
;
idx
<
cpu_pmu
->
num_events
;
++
idx
)
{
if
(
!
test_and_set_bit
(
idx
,
cpuc
->
used_mask
))
return
idx
;
}
/* The counters are all in use. */
return
-
EAGAIN
;
}
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
static
int
armv7pmu_set_event_filter
(
struct
hw_perf_event
*
event
,
struct
perf_event_attr
*
attr
)
{
unsigned
long
config_base
=
0
;
if
(
attr
->
exclude_idle
)
return
-
EPERM
;
if
(
attr
->
exclude_user
)
config_base
|=
ARMV7_EXCLUDE_USER
;
if
(
attr
->
exclude_kernel
)
config_base
|=
ARMV7_EXCLUDE_PL1
;
if
(
!
attr
->
exclude_hv
)
config_base
|=
ARMV7_INCLUDE_HYP
;
/*
* Install the filter into config_base as this is used to
* construct the event type.
*/
event
->
config_base
=
config_base
;
return
0
;
}
static
void
armv7pmu_reset
(
void
*
info
)
{
u32
idx
,
nb_cnt
=
arm
pmu
->
num_events
;
u32
idx
,
nb_cnt
=
cpu_
pmu
->
num_events
;
/* The counter and interrupt enable registers are unknown at reset. */
for
(
idx
=
1
;
idx
<
nb_cnt
;
++
idx
)
for
(
idx
=
ARMV7_IDX_CYCLE_COUNTER
;
idx
<
nb_cnt
;
++
idx
)
armv7pmu_disable_event
(
NULL
,
idx
);
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write
(
ARMV7_PMNC_P
|
ARMV7_PMNC_C
);
}
static
int
armv7_a8_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
armv7_a8_perf_map
,
&
armv7_a8_perf_cache_map
,
0xFF
);
}
static
int
armv7_a9_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
armv7_a9_perf_map
,
&
armv7_a9_perf_cache_map
,
0xFF
);
}
static
int
armv7_a5_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
armv7_a5_perf_map
,
&
armv7_a5_perf_cache_map
,
0xFF
);
}
static
int
armv7_a15_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
armv7_a15_perf_map
,
&
armv7_a15_perf_cache_map
,
0xFF
);
}
static
struct
arm_pmu
armv7pmu
=
{
.
handle_irq
=
armv7pmu_handle_irq
,
.
enable
=
armv7pmu_enable_event
,
...
...
@@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = {
.
start
=
armv7pmu_start
,
.
stop
=
armv7pmu_stop
,
.
reset
=
armv7pmu_reset
,
.
raw_event_mask
=
0xFF
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
...
...
@@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void)
return
nb_cnt
+
1
;
}
static
const
struct
arm_pmu
*
__init
armv7_a8_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv7_a8_pmu_init
(
void
)
{
armv7pmu
.
id
=
ARM_PERF_PMU_ID_CA8
;
armv7pmu
.
name
=
"ARMv7 Cortex-A8"
;
armv7pmu
.
cache_map
=
&
armv7_a8_perf_cache_map
;
armv7pmu
.
event_map
=
&
armv7_a8_perf_map
;
armv7pmu
.
map_event
=
armv7_a8_map_event
;
armv7pmu
.
num_events
=
armv7_read_num_pmnc_events
();
return
&
armv7pmu
;
}
static
const
struct
arm_pmu
*
__init
armv7_a9_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv7_a9_pmu_init
(
void
)
{
armv7pmu
.
id
=
ARM_PERF_PMU_ID_CA9
;
armv7pmu
.
name
=
"ARMv7 Cortex-A9"
;
armv7pmu
.
cache_map
=
&
armv7_a9_perf_cache_map
;
armv7pmu
.
event_map
=
&
armv7_a9_perf_map
;
armv7pmu
.
map_event
=
armv7_a9_map_event
;
armv7pmu
.
num_events
=
armv7_read_num_pmnc_events
();
return
&
armv7pmu
;
}
static
const
struct
arm_pmu
*
__init
armv7_a5_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv7_a5_pmu_init
(
void
)
{
armv7pmu
.
id
=
ARM_PERF_PMU_ID_CA5
;
armv7pmu
.
name
=
"ARMv7 Cortex-A5"
;
armv7pmu
.
cache_map
=
&
armv7_a5_perf_cache_map
;
armv7pmu
.
event_map
=
&
armv7_a5_perf_map
;
armv7pmu
.
map_event
=
armv7_a5_map_event
;
armv7pmu
.
num_events
=
armv7_read_num_pmnc_events
();
return
&
armv7pmu
;
}
static
const
struct
arm_pmu
*
__init
armv7_a15_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv7_a15_pmu_init
(
void
)
{
armv7pmu
.
id
=
ARM_PERF_PMU_ID_CA15
;
armv7pmu
.
name
=
"ARMv7 Cortex-A15"
;
armv7pmu
.
cache_map
=
&
armv7_a15_perf_cache_map
;
armv7pmu
.
event_map
=
&
armv7_a15_perf_map
;
armv7pmu
.
map_event
=
armv7_a15_map_event
;
armv7pmu
.
num_events
=
armv7_read_num_pmnc_events
();
armv7pmu
.
set_event_filter
=
armv7pmu_set_event_filter
;
return
&
armv7pmu
;
}
#else
static
const
struct
arm_pmu
*
__init
armv7_a8_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv7_a8_pmu_init
(
void
)
{
return
NULL
;
}
static
const
struct
arm_pmu
*
__init
armv7_a9_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv7_a9_pmu_init
(
void
)
{
return
NULL
;
}
static
const
struct
arm_pmu
*
__init
armv7_a5_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv7_a5_pmu_init
(
void
)
{
return
NULL
;
}
static
const
struct
arm_pmu
*
__init
armv7_a15_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv7_a15_pmu_init
(
void
)
{
return
NULL
;
}
...
...
arch/arm/kernel/perf_event_xscale.c
View file @
4fb0d2ea
...
...
@@ -40,7 +40,7 @@ enum xscale_perf_types {
};
enum
xscale_counters
{
XSCALE_CYCLE_COUNTER
=
1
,
XSCALE_CYCLE_COUNTER
=
0
,
XSCALE_COUNTER0
,
XSCALE_COUNTER1
,
XSCALE_COUNTER2
,
...
...
@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
{
unsigned
long
pmnc
;
struct
perf_sample_data
data
;
struct
cp
u_hw_events
*
cpuc
;
struct
pm
u_hw_events
*
cpuc
;
struct
pt_regs
*
regs
;
int
idx
;
...
...
@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init
(
&
data
,
0
);
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
for
(
idx
=
0
;
idx
<
=
arm
pmu
->
num_events
;
++
idx
)
{
for
(
idx
=
0
;
idx
<
cpu_
pmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
struct
hw_perf_event
*
hwc
;
if
(
!
test_bit
(
idx
,
cpuc
->
active_mask
))
continue
;
if
(
!
xscale1_pmnc_counter_has_overflowed
(
pmnc
,
idx
))
continue
;
...
...
@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue
;
if
(
perf_event_overflow
(
event
,
&
data
,
regs
))
arm
pmu
->
disable
(
hwc
,
idx
);
cpu_
pmu
->
disable
(
hwc
,
idx
);
}
irq_work_run
();
...
...
@@ -284,6 +281,7 @@ static void
xscale1pmu_enable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
val
,
mask
,
evt
,
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
switch
(
idx
)
{
case
XSCALE_CYCLE_COUNTER
:
...
...
@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return
;
}
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale1pmu_read_pmnc
();
val
&=
~
mask
;
val
|=
evt
;
xscale1pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
xscale1pmu_disable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
val
,
mask
,
evt
,
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
switch
(
idx
)
{
case
XSCALE_CYCLE_COUNTER
:
...
...
@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return
;
}
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale1pmu_read_pmnc
();
val
&=
~
mask
;
val
|=
evt
;
xscale1pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
xscale1pmu_get_event_idx
(
struct
cp
u_hw_events
*
cpuc
,
xscale1pmu_get_event_idx
(
struct
pm
u_hw_events
*
cpuc
,
struct
hw_perf_event
*
event
)
{
if
(
XSCALE_PERFCTR_CCNT
==
event
->
config_base
)
{
...
...
@@ -368,24 +367,26 @@ static void
xscale1pmu_start
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale1pmu_read_pmnc
();
val
|=
XSCALE_PMU_ENABLE
;
xscale1pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
xscale1pmu_stop
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale1pmu_read_pmnc
();
val
&=
~
XSCALE_PMU_ENABLE
;
xscale1pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
inline
u32
...
...
@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
}
}
static
const
struct
arm_pmu
xscale1pmu
=
{
static
int
xscale_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
xscale_perf_map
,
&
xscale_perf_cache_map
,
0xFF
);
}
static
struct
arm_pmu
xscale1pmu
=
{
.
id
=
ARM_PERF_PMU_ID_XSCALE1
,
.
name
=
"xscale1"
,
.
handle_irq
=
xscale1pmu_handle_irq
,
...
...
@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
.
get_event_idx
=
xscale1pmu_get_event_idx
,
.
start
=
xscale1pmu_start
,
.
stop
=
xscale1pmu_stop
,
.
cache_map
=
&
xscale_perf_cache_map
,
.
event_map
=
&
xscale_perf_map
,
.
raw_event_mask
=
0xFF
,
.
map_event
=
xscale_map_event
,
.
num_events
=
3
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
static
const
struct
arm_pmu
*
__init
xscale1pmu_init
(
void
)
static
struct
arm_pmu
*
__init
xscale1pmu_init
(
void
)
{
return
&
xscale1pmu
;
}
...
...
@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
{
unsigned
long
pmnc
,
of_flags
;
struct
perf_sample_data
data
;
struct
cp
u_hw_events
*
cpuc
;
struct
pm
u_hw_events
*
cpuc
;
struct
pt_regs
*
regs
;
int
idx
;
...
...
@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init
(
&
data
,
0
);
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
for
(
idx
=
0
;
idx
<
=
arm
pmu
->
num_events
;
++
idx
)
{
for
(
idx
=
0
;
idx
<
cpu_
pmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
struct
hw_perf_event
*
hwc
;
if
(
!
test_bit
(
idx
,
cpuc
->
active_mask
))
continue
;
if
(
!
xscale2_pmnc_counter_has_overflowed
(
pmnc
,
idx
))
continue
;
...
...
@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue
;
if
(
perf_event_overflow
(
event
,
&
data
,
regs
))
arm
pmu
->
disable
(
hwc
,
idx
);
cpu_
pmu
->
disable
(
hwc
,
idx
);
}
irq_work_run
();
...
...
@@ -616,6 +618,7 @@ static void
xscale2pmu_enable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
flags
,
ien
,
evtsel
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
ien
=
xscale2pmu_read_int_enable
();
evtsel
=
xscale2pmu_read_event_select
();
...
...
@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return
;
}
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
xscale2pmu_write_event_select
(
evtsel
);
xscale2pmu_write_int_enable
(
ien
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
xscale2pmu_disable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
flags
,
ien
,
evtsel
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
ien
=
xscale2pmu_read_int_enable
();
evtsel
=
xscale2pmu_read_event_select
();
...
...
@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return
;
}
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
xscale2pmu_write_event_select
(
evtsel
);
xscale2pmu_write_int_enable
(
ien
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
xscale2pmu_get_event_idx
(
struct
cp
u_hw_events
*
cpuc
,
xscale2pmu_get_event_idx
(
struct
pm
u_hw_events
*
cpuc
,
struct
hw_perf_event
*
event
)
{
int
idx
=
xscale1pmu_get_event_idx
(
cpuc
,
event
);
...
...
@@ -718,24 +722,26 @@ static void
xscale2pmu_start
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale2pmu_read_pmnc
()
&
~
XSCALE_PMU_CNT64
;
val
|=
XSCALE_PMU_ENABLE
;
xscale2pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
xscale2pmu_stop
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale2pmu_read_pmnc
();
val
&=
~
XSCALE_PMU_ENABLE
;
xscale2pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
inline
u32
...
...
@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
}
}
static
const
struct
arm_pmu
xscale2pmu
=
{
static
struct
arm_pmu
xscale2pmu
=
{
.
id
=
ARM_PERF_PMU_ID_XSCALE2
,
.
name
=
"xscale2"
,
.
handle_irq
=
xscale2pmu_handle_irq
,
...
...
@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
.
get_event_idx
=
xscale2pmu_get_event_idx
,
.
start
=
xscale2pmu_start
,
.
stop
=
xscale2pmu_stop
,
.
cache_map
=
&
xscale_perf_cache_map
,
.
event_map
=
&
xscale_perf_map
,
.
raw_event_mask
=
0xFF
,
.
map_event
=
xscale_map_event
,
.
num_events
=
5
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
static
const
struct
arm_pmu
*
__init
xscale2pmu_init
(
void
)
static
struct
arm_pmu
*
__init
xscale2pmu_init
(
void
)
{
return
&
xscale2pmu
;
}
#else
static
const
struct
arm_pmu
*
__init
xscale1pmu_init
(
void
)
static
struct
arm_pmu
*
__init
xscale1pmu_init
(
void
)
{
return
NULL
;
}
static
const
struct
arm_pmu
*
__init
xscale2pmu_init
(
void
)
static
struct
arm_pmu
*
__init
xscale2pmu_init
(
void
)
{
return
NULL
;
}
...
...
arch/arm/kernel/pmu.c
View file @
4fb0d2ea
...
...
@@ -10,192 +10,26 @@
*
*/
#define pr_fmt(fmt) "PMU: " fmt
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <asm/pmu.h>
static
volatile
long
pmu_lock
;
static
struct
platform_device
*
pmu_devices
[
ARM_NUM_PMU_DEVICES
];
static
int
__devinit
pmu_register
(
struct
platform_device
*
pdev
,
enum
arm_pmu_type
type
)
{
if
(
type
<
0
||
type
>=
ARM_NUM_PMU_DEVICES
)
{
pr_warning
(
"received registration request for unknown "
"PMU device type %d
\n
"
,
type
);
return
-
EINVAL
;
}
if
(
pmu_devices
[
type
])
{
pr_warning
(
"rejecting duplicate registration of PMU device "
"type %d."
,
type
);
return
-
ENOSPC
;
}
pr_info
(
"registered new PMU device of type %d
\n
"
,
type
);
pmu_devices
[
type
]
=
pdev
;
return
0
;
}
#define OF_MATCH_PMU(_name, _type) { \
.compatible = _name, \
.data = (void *)_type, \
}
#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
static
struct
of_device_id
armpmu_of_device_ids
[]
=
{
OF_MATCH_CPU
(
"arm,cortex-a9-pmu"
),
OF_MATCH_CPU
(
"arm,cortex-a8-pmu"
),
OF_MATCH_CPU
(
"arm,arm1136-pmu"
),
OF_MATCH_CPU
(
"arm,arm1176-pmu"
),
{},
};
#define PLAT_MATCH_PMU(_name, _type) { \
.name = _name, \
.driver_data = _type, \
}
#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
static
struct
platform_device_id
armpmu_plat_device_ids
[]
=
{
PLAT_MATCH_CPU
(
"arm-pmu"
),
{},
};
enum
arm_pmu_type
armpmu_device_type
(
struct
platform_device
*
pdev
)
{
const
struct
of_device_id
*
of_id
;
const
struct
platform_device_id
*
pdev_id
;
/* provided by of_device_id table */
if
(
pdev
->
dev
.
of_node
)
{
of_id
=
of_match_device
(
armpmu_of_device_ids
,
&
pdev
->
dev
);
BUG_ON
(
!
of_id
);
return
(
enum
arm_pmu_type
)
of_id
->
data
;
}
/* Provided by platform_device_id table */
pdev_id
=
platform_get_device_id
(
pdev
);
BUG_ON
(
!
pdev_id
);
return
pdev_id
->
driver_data
;
}
static
int
__devinit
armpmu_device_probe
(
struct
platform_device
*
pdev
)
{
return
pmu_register
(
pdev
,
armpmu_device_type
(
pdev
));
}
static
struct
platform_driver
armpmu_driver
=
{
.
driver
=
{
.
name
=
"arm-pmu"
,
.
of_match_table
=
armpmu_of_device_ids
,
},
.
probe
=
armpmu_device_probe
,
.
id_table
=
armpmu_plat_device_ids
,
};
static
int
__init
register_pmu_driver
(
void
)
{
return
platform_driver_register
(
&
armpmu_driver
);
}
device_initcall
(
register_pmu_driver
);
/*
* PMU locking to ensure mutual exclusion between different subsystems.
*/
static
unsigned
long
pmu_lock
[
BITS_TO_LONGS
(
ARM_NUM_PMU_DEVICES
)];
struct
platform_device
*
int
reserve_pmu
(
enum
arm_pmu_type
type
)
{
struct
platform_device
*
pdev
;
if
(
test_and_set_bit_lock
(
type
,
&
pmu_lock
))
{
pdev
=
ERR_PTR
(
-
EBUSY
);
}
else
if
(
pmu_devices
[
type
]
==
NULL
)
{
clear_bit_unlock
(
type
,
&
pmu_lock
);
pdev
=
ERR_PTR
(
-
ENODEV
);
}
else
{
pdev
=
pmu_devices
[
type
];
}
return
pdev
;
return
test_and_set_bit_lock
(
type
,
pmu_lock
)
?
-
EBUSY
:
0
;
}
EXPORT_SYMBOL_GPL
(
reserve_pmu
);
int
void
release_pmu
(
enum
arm_pmu_type
type
)
{
if
(
WARN_ON
(
!
pmu_devices
[
type
]))
return
-
EINVAL
;
clear_bit_unlock
(
type
,
&
pmu_lock
);
return
0
;
}
EXPORT_SYMBOL_GPL
(
release_pmu
);
static
int
set_irq_affinity
(
int
irq
,
unsigned
int
cpu
)
{
#ifdef CONFIG_SMP
int
err
=
irq_set_affinity
(
irq
,
cpumask_of
(
cpu
));
if
(
err
)
pr_warning
(
"unable to set irq affinity (irq=%d, cpu=%u)
\n
"
,
irq
,
cpu
);
return
err
;
#else
return
-
EINVAL
;
#endif
}
static
int
init_cpu_pmu
(
void
)
{
int
i
,
irqs
,
err
=
0
;
struct
platform_device
*
pdev
=
pmu_devices
[
ARM_PMU_DEVICE_CPU
];
if
(
!
pdev
)
return
-
ENODEV
;
irqs
=
pdev
->
num_resources
;
/*
* If we have a single PMU interrupt that we can't shift, assume that
* we're running on a uniprocessor machine and continue.
*/
if
(
irqs
==
1
&&
!
irq_can_set_affinity
(
platform_get_irq
(
pdev
,
0
)))
return
0
;
for
(
i
=
0
;
i
<
irqs
;
++
i
)
{
err
=
set_irq_affinity
(
platform_get_irq
(
pdev
,
i
),
i
);
if
(
err
)
break
;
}
return
err
;
}
int
init_pmu
(
enum
arm_pmu_type
type
)
{
int
err
=
0
;
switch
(
type
)
{
case
ARM_PMU_DEVICE_CPU
:
err
=
init_cpu_pmu
();
break
;
default:
pr_warning
(
"attempt to initialise PMU of unknown "
"type %d
\n
"
,
type
);
err
=
-
EINVAL
;
}
return
err
;
clear_bit_unlock
(
type
,
pmu_lock
);
}
EXPORT_SYMBOL_GPL
(
init_pmu
);
kernel/events/core.c
View file @
4fb0d2ea
...
...
@@ -5715,6 +5715,7 @@ struct pmu *perf_init_event(struct perf_event *event)
pmu
=
idr_find
(
&
pmu_idr
,
event
->
attr
.
type
);
rcu_read_unlock
();
if
(
pmu
)
{
event
->
pmu
=
pmu
;
ret
=
pmu
->
event_init
(
event
);
if
(
ret
)
pmu
=
ERR_PTR
(
ret
);
...
...
@@ -5722,6 +5723,7 @@ struct pmu *perf_init_event(struct perf_event *event)
}
list_for_each_entry_rcu
(
pmu
,
&
pmus
,
entry
)
{
event
->
pmu
=
pmu
;
ret
=
pmu
->
event_init
(
event
);
if
(
!
ret
)
goto
unlock
;
...
...
@@ -5848,8 +5850,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
return
ERR_PTR
(
err
);
}
event
->
pmu
=
pmu
;
if
(
!
event
->
parent
)
{
if
(
event
->
attach_state
&
PERF_ATTACH_TASK
)
jump_label_inc
(
&
perf_sched_events
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment