Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
4fb0d2ea
Commit
4fb0d2ea
authored
Aug 31, 2011
by
Will Deacon
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'hwbreak', 'perf/updates' and 'perf/system-pmus' into for-rmk
parents
d1244336
7325eaec
Changes
7
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
640 additions
and
686 deletions
+640
-686
arch/arm/include/asm/pmu.h
arch/arm/include/asm/pmu.h
+74
-19
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event.c
+254
-221
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v6.c
+59
-28
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_v7.c
+196
-199
arch/arm/kernel/perf_event_xscale.c
arch/arm/kernel/perf_event_xscale.c
+47
-43
arch/arm/kernel/pmu.c
arch/arm/kernel/pmu.c
+8
-174
kernel/events/core.c
kernel/events/core.c
+2
-2
No files found.
arch/arm/include/asm/pmu.h
View file @
4fb0d2ea
...
...
@@ -13,7 +13,12 @@
#define __ARM_PMU_H__
#include <linux/interrupt.h>
#include <linux/perf_event.h>
/*
* Types of PMUs that can be accessed directly and require mutual
* exclusion between profiling tools.
*/
enum
arm_pmu_type
{
ARM_PMU_DEVICE_CPU
=
0
,
ARM_NUM_PMU_DEVICES
,
...
...
@@ -37,21 +42,17 @@ struct arm_pmu_platdata {
* reserve_pmu() - reserve the hardware performance counters
*
* Reserve the hardware performance counters in the system for exclusive use.
* The platform_device for the system is returned on success, ERR_PTR()
* encoded error on failure.
* Returns 0 on success or -EBUSY if the lock is already held.
*/
extern
struct
platform_device
*
extern
int
reserve_pmu
(
enum
arm_pmu_type
type
);
/**
* release_pmu() - Relinquish control of the performance counters
*
* Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
* this. The platform_device returned from reserve_pmu() must be passed as
* a cookie.
*/
extern
int
extern
void
release_pmu
(
enum
arm_pmu_type
type
);
/**
...
...
@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type);
#include <linux/err.h>
static
inline
struct
platform_device
*
reserve_pmu
(
enum
arm_pmu_type
type
)
{
return
ERR_PTR
(
-
ENODEV
);
}
static
inline
int
re
leas
e_pmu
(
enum
arm_pmu_type
type
)
re
serv
e_pmu
(
enum
arm_pmu_type
type
)
{
return
-
ENODEV
;
}
static
inline
int
init_pmu
(
enum
arm_pmu_type
type
)
{
return
-
ENODEV
;
}
static
inline
void
release_pmu
(
enum
arm_pmu_type
type
)
{
}
#endif
/* CONFIG_CPU_HAS_PMU */
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
struct
pmu_hw_events
{
/*
* The events that are active on the PMU for the given index.
*/
struct
perf_event
**
events
;
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned
long
*
used_mask
;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t
pmu_lock
;
};
struct
arm_pmu
{
struct
pmu
pmu
;
enum
arm_perf_pmu_ids
id
;
enum
arm_pmu_type
type
;
cpumask_t
active_irqs
;
const
char
*
name
;
irqreturn_t
(
*
handle_irq
)(
int
irq_num
,
void
*
dev
);
void
(
*
enable
)(
struct
hw_perf_event
*
evt
,
int
idx
);
void
(
*
disable
)(
struct
hw_perf_event
*
evt
,
int
idx
);
int
(
*
get_event_idx
)(
struct
pmu_hw_events
*
hw_events
,
struct
hw_perf_event
*
hwc
);
int
(
*
set_event_filter
)(
struct
hw_perf_event
*
evt
,
struct
perf_event_attr
*
attr
);
u32
(
*
read_counter
)(
int
idx
);
void
(
*
write_counter
)(
int
idx
,
u32
val
);
void
(
*
start
)(
void
);
void
(
*
stop
)(
void
);
void
(
*
reset
)(
void
*
);
int
(
*
map_event
)(
struct
perf_event
*
event
);
int
num_events
;
atomic_t
active_events
;
struct
mutex
reserve_mutex
;
u64
max_period
;
struct
platform_device
*
plat_device
;
struct
pmu_hw_events
*
(
*
get_hw_events
)(
void
);
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
int
__init
armpmu_register
(
struct
arm_pmu
*
armpmu
,
char
*
name
,
int
type
);
u64
armpmu_event_update
(
struct
perf_event
*
event
,
struct
hw_perf_event
*
hwc
,
int
idx
,
int
overflow
);
int
armpmu_event_set_period
(
struct
perf_event
*
event
,
struct
hw_perf_event
*
hwc
,
int
idx
);
#endif
/* CONFIG_HW_PERF_EVENTS */
#endif
/* __ARM_PMU_H__ */
arch/arm/kernel/perf_event.c
View file @
4fb0d2ea
This diff is collapsed.
Click to expand it.
arch/arm/kernel/perf_event_v6.c
View file @
4fb0d2ea
...
...
@@ -54,7 +54,7 @@ enum armv6_perf_types {
};
enum
armv6_counters
{
ARMV6_CYCLE_COUNTER
=
1
,
ARMV6_CYCLE_COUNTER
=
0
,
ARMV6_COUNTER0
,
ARMV6_COUNTER1
,
};
...
...
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
int
idx
)
{
unsigned
long
val
,
mask
,
evt
,
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
if
(
ARMV6_CYCLE_COUNTER
==
idx
)
{
mask
=
0
;
...
...
@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
&=
~
mask
;
val
|=
evt
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
counter_is_active
(
unsigned
long
pmcr
,
int
idx
)
{
unsigned
long
mask
=
0
;
if
(
idx
==
ARMV6_CYCLE_COUNTER
)
mask
=
ARMV6_PMCR_CCOUNT_IEN
;
else
if
(
idx
==
ARMV6_COUNTER0
)
mask
=
ARMV6_PMCR_COUNT0_IEN
;
else
if
(
idx
==
ARMV6_COUNTER1
)
mask
=
ARMV6_PMCR_COUNT1_IEN
;
if
(
mask
)
return
pmcr
&
mask
;
WARN_ONCE
(
1
,
"invalid counter number (%d)
\n
"
,
idx
);
return
0
;
}
static
irqreturn_t
...
...
@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
{
unsigned
long
pmcr
=
armv6_pmcr_read
();
struct
perf_sample_data
data
;
struct
cp
u_hw_events
*
cpuc
;
struct
pm
u_hw_events
*
cpuc
;
struct
pt_regs
*
regs
;
int
idx
;
...
...
@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
perf_sample_data_init
(
&
data
,
0
);
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
for
(
idx
=
0
;
idx
<
=
arm
pmu
->
num_events
;
++
idx
)
{
for
(
idx
=
0
;
idx
<
cpu_
pmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
struct
hw_perf_event
*
hwc
;
if
(
!
test_bit
(
idx
,
cpuc
->
active_mask
))
if
(
!
counter_is_active
(
pmcr
,
idx
))
continue
;
/*
...
...
@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
continue
;
if
(
perf_event_overflow
(
event
,
&
data
,
regs
))
arm
pmu
->
disable
(
hwc
,
idx
);
cpu_
pmu
->
disable
(
hwc
,
idx
);
}
/*
...
...
@@ -527,28 +545,30 @@ static void
armv6pmu_start
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
|=
ARMV6_PMCR_ENABLE
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
armv6pmu_stop
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
&=
~
ARMV6_PMCR_ENABLE
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
armv6pmu_get_event_idx
(
struct
cp
u_hw_events
*
cpuc
,
armv6pmu_get_event_idx
(
struct
pm
u_hw_events
*
cpuc
,
struct
hw_perf_event
*
event
)
{
/* Always place a cycle counter into the cycle counter. */
...
...
@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
int
idx
)
{
unsigned
long
val
,
mask
,
evt
,
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
if
(
ARMV6_CYCLE_COUNTER
==
idx
)
{
mask
=
ARMV6_PMCR_CCOUNT_IEN
;
...
...
@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
&=
~
mask
;
val
|=
evt
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
...
...
@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int
idx
)
{
unsigned
long
val
,
mask
,
flags
,
evt
=
0
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
if
(
ARMV6_CYCLE_COUNTER
==
idx
)
{
mask
=
ARMV6_PMCR_CCOUNT_IEN
;
...
...
@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
armv6_pmcr_read
();
val
&=
~
mask
;
val
|=
evt
;
armv6_pmcr_write
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
armv6_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
armv6_perf_map
,
&
armv6_perf_cache_map
,
0xFF
);
}
static
const
struct
arm_pmu
armv6pmu
=
{
static
struct
arm_pmu
armv6pmu
=
{
.
id
=
ARM_PERF_PMU_ID_V6
,
.
name
=
"v6"
,
.
handle_irq
=
armv6pmu_handle_irq
,
...
...
@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
.
get_event_idx
=
armv6pmu_get_event_idx
,
.
start
=
armv6pmu_start
,
.
stop
=
armv6pmu_stop
,
.
cache_map
=
&
armv6_perf_cache_map
,
.
event_map
=
&
armv6_perf_map
,
.
raw_event_mask
=
0xFF
,
.
map_event
=
armv6_map_event
,
.
num_events
=
3
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
static
const
struct
arm_pmu
*
__init
armv6pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv6pmu_init
(
void
)
{
return
&
armv6pmu
;
}
...
...
@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
* disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting.
*/
static
const
struct
arm_pmu
armv6mpcore_pmu
=
{
static
int
armv6mpcore_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
armv6mpcore_perf_map
,
&
armv6mpcore_perf_cache_map
,
0xFF
);
}
static
struct
arm_pmu
armv6mpcore_pmu
=
{
.
id
=
ARM_PERF_PMU_ID_V6MP
,
.
name
=
"v6mpcore"
,
.
handle_irq
=
armv6pmu_handle_irq
,
...
...
@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
.
get_event_idx
=
armv6pmu_get_event_idx
,
.
start
=
armv6pmu_start
,
.
stop
=
armv6pmu_stop
,
.
cache_map
=
&
armv6mpcore_perf_cache_map
,
.
event_map
=
&
armv6mpcore_perf_map
,
.
raw_event_mask
=
0xFF
,
.
map_event
=
armv6mpcore_map_event
,
.
num_events
=
3
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
static
const
struct
arm_pmu
*
__init
armv6mpcore_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv6mpcore_pmu_init
(
void
)
{
return
&
armv6mpcore_pmu
;
}
#else
static
const
struct
arm_pmu
*
__init
armv6pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv6pmu_init
(
void
)
{
return
NULL
;
}
static
const
struct
arm_pmu
*
__init
armv6mpcore_pmu_init
(
void
)
static
struct
arm_pmu
*
__init
armv6mpcore_pmu_init
(
void
)
{
return
NULL
;
}
...
...
arch/arm/kernel/perf_event_v7.c
View file @
4fb0d2ea
This diff is collapsed.
Click to expand it.
arch/arm/kernel/perf_event_xscale.c
View file @
4fb0d2ea
...
...
@@ -40,7 +40,7 @@ enum xscale_perf_types {
};
enum
xscale_counters
{
XSCALE_CYCLE_COUNTER
=
1
,
XSCALE_CYCLE_COUNTER
=
0
,
XSCALE_COUNTER0
,
XSCALE_COUNTER1
,
XSCALE_COUNTER2
,
...
...
@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
{
unsigned
long
pmnc
;
struct
perf_sample_data
data
;
struct
cp
u_hw_events
*
cpuc
;
struct
pm
u_hw_events
*
cpuc
;
struct
pt_regs
*
regs
;
int
idx
;
...
...
@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init
(
&
data
,
0
);
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
for
(
idx
=
0
;
idx
<
=
arm
pmu
->
num_events
;
++
idx
)
{
for
(
idx
=
0
;
idx
<
cpu_
pmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
struct
hw_perf_event
*
hwc
;
if
(
!
test_bit
(
idx
,
cpuc
->
active_mask
))
continue
;
if
(
!
xscale1_pmnc_counter_has_overflowed
(
pmnc
,
idx
))
continue
;
...
...
@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue
;
if
(
perf_event_overflow
(
event
,
&
data
,
regs
))
arm
pmu
->
disable
(
hwc
,
idx
);
cpu_
pmu
->
disable
(
hwc
,
idx
);
}
irq_work_run
();
...
...
@@ -284,6 +281,7 @@ static void
xscale1pmu_enable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
val
,
mask
,
evt
,
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
switch
(
idx
)
{
case
XSCALE_CYCLE_COUNTER
:
...
...
@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return
;
}
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale1pmu_read_pmnc
();
val
&=
~
mask
;
val
|=
evt
;
xscale1pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
xscale1pmu_disable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
val
,
mask
,
evt
,
flags
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
switch
(
idx
)
{
case
XSCALE_CYCLE_COUNTER
:
...
...
@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return
;
}
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale1pmu_read_pmnc
();
val
&=
~
mask
;
val
|=
evt
;
xscale1pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
xscale1pmu_get_event_idx
(
struct
cp
u_hw_events
*
cpuc
,
xscale1pmu_get_event_idx
(
struct
pm
u_hw_events
*
cpuc
,
struct
hw_perf_event
*
event
)
{
if
(
XSCALE_PERFCTR_CCNT
==
event
->
config_base
)
{
...
...
@@ -368,24 +367,26 @@ static void
xscale1pmu_start
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale1pmu_read_pmnc
();
val
|=
XSCALE_PMU_ENABLE
;
xscale1pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
xscale1pmu_stop
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale1pmu_read_pmnc
();
val
&=
~
XSCALE_PMU_ENABLE
;
xscale1pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
inline
u32
...
...
@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
}
}
static
const
struct
arm_pmu
xscale1pmu
=
{
static
int
xscale_map_event
(
struct
perf_event
*
event
)
{
return
map_cpu_event
(
event
,
&
xscale_perf_map
,
&
xscale_perf_cache_map
,
0xFF
);
}
static
struct
arm_pmu
xscale1pmu
=
{
.
id
=
ARM_PERF_PMU_ID_XSCALE1
,
.
name
=
"xscale1"
,
.
handle_irq
=
xscale1pmu_handle_irq
,
...
...
@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
.
get_event_idx
=
xscale1pmu_get_event_idx
,
.
start
=
xscale1pmu_start
,
.
stop
=
xscale1pmu_stop
,
.
cache_map
=
&
xscale_perf_cache_map
,
.
event_map
=
&
xscale_perf_map
,
.
raw_event_mask
=
0xFF
,
.
map_event
=
xscale_map_event
,
.
num_events
=
3
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
static
const
struct
arm_pmu
*
__init
xscale1pmu_init
(
void
)
static
struct
arm_pmu
*
__init
xscale1pmu_init
(
void
)
{
return
&
xscale1pmu
;
}
...
...
@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
{
unsigned
long
pmnc
,
of_flags
;
struct
perf_sample_data
data
;
struct
cp
u_hw_events
*
cpuc
;
struct
pm
u_hw_events
*
cpuc
;
struct
pt_regs
*
regs
;
int
idx
;
...
...
@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init
(
&
data
,
0
);
cpuc
=
&
__get_cpu_var
(
cpu_hw_events
);
for
(
idx
=
0
;
idx
<
=
arm
pmu
->
num_events
;
++
idx
)
{
for
(
idx
=
0
;
idx
<
cpu_
pmu
->
num_events
;
++
idx
)
{
struct
perf_event
*
event
=
cpuc
->
events
[
idx
];
struct
hw_perf_event
*
hwc
;
if
(
!
test_bit
(
idx
,
cpuc
->
active_mask
))
continue
;
if
(
!
xscale2_pmnc_counter_has_overflowed
(
pmnc
,
idx
))
continue
;
...
...
@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue
;
if
(
perf_event_overflow
(
event
,
&
data
,
regs
))
arm
pmu
->
disable
(
hwc
,
idx
);
cpu_
pmu
->
disable
(
hwc
,
idx
);
}
irq_work_run
();
...
...
@@ -616,6 +618,7 @@ static void
xscale2pmu_enable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
flags
,
ien
,
evtsel
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
ien
=
xscale2pmu_read_int_enable
();
evtsel
=
xscale2pmu_read_event_select
();
...
...
@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return
;
}
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
xscale2pmu_write_event_select
(
evtsel
);
xscale2pmu_write_int_enable
(
ien
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
xscale2pmu_disable_event
(
struct
hw_perf_event
*
hwc
,
int
idx
)
{
unsigned
long
flags
,
ien
,
evtsel
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
ien
=
xscale2pmu_read_int_enable
();
evtsel
=
xscale2pmu_read_event_select
();
...
...
@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return
;
}
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
xscale2pmu_write_event_select
(
evtsel
);
xscale2pmu_write_int_enable
(
ien
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
int
xscale2pmu_get_event_idx
(
struct
cp
u_hw_events
*
cpuc
,
xscale2pmu_get_event_idx
(
struct
pm
u_hw_events
*
cpuc
,
struct
hw_perf_event
*
event
)
{
int
idx
=
xscale1pmu_get_event_idx
(
cpuc
,
event
);
...
...
@@ -718,24 +722,26 @@ static void
xscale2pmu_start
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale2pmu_read_pmnc
()
&
~
XSCALE_PMU_CNT64
;
val
|=
XSCALE_PMU_ENABLE
;
xscale2pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
void
xscale2pmu_stop
(
void
)
{
unsigned
long
flags
,
val
;
struct
pmu_hw_events
*
events
=
cpu_pmu
->
get_hw_events
();
raw_spin_lock_irqsave
(
&
pmu_lock
,
flags
);
raw_spin_lock_irqsave
(
&
events
->
pmu_lock
,
flags
);
val
=
xscale2pmu_read_pmnc
();
val
&=
~
XSCALE_PMU_ENABLE
;
xscale2pmu_write_pmnc
(
val
);
raw_spin_unlock_irqrestore
(
&
pmu_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
events
->
pmu_lock
,
flags
);
}
static
inline
u32
...
...
@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
}
}
static
const
struct
arm_pmu
xscale2pmu
=
{
static
struct
arm_pmu
xscale2pmu
=
{
.
id
=
ARM_PERF_PMU_ID_XSCALE2
,
.
name
=
"xscale2"
,
.
handle_irq
=
xscale2pmu_handle_irq
,
...
...
@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
.
get_event_idx
=
xscale2pmu_get_event_idx
,
.
start
=
xscale2pmu_start
,
.
stop
=
xscale2pmu_stop
,
.
cache_map
=
&
xscale_perf_cache_map
,
.
event_map
=
&
xscale_perf_map
,
.
raw_event_mask
=
0xFF
,
.
map_event
=
xscale_map_event
,
.
num_events
=
5
,
.
max_period
=
(
1LLU
<<
32
)
-
1
,
};
static
const
struct
arm_pmu
*
__init
xscale2pmu_init
(
void
)
static
struct
arm_pmu
*
__init
xscale2pmu_init
(
void
)
{
return
&
xscale2pmu
;
}
#else
static
const
struct
arm_pmu
*
__init
xscale1pmu_init
(
void
)
static
struct
arm_pmu
*
__init
xscale1pmu_init
(
void
)
{
return
NULL
;
}
static
const
struct
arm_pmu
*
__init
xscale2pmu_init
(
void
)
static
struct
arm_pmu
*
__init
xscale2pmu_init
(
void
)
{
return
NULL
;
}
...
...
arch/arm/kernel/pmu.c
View file @
4fb0d2ea
...
...
@@ -10,192 +10,26 @@
*
*/
#define pr_fmt(fmt) "PMU: " fmt
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <asm/pmu.h>
static
volatile
long
pmu_lock
;
static
struct
platform_device
*
pmu_devices
[
ARM_NUM_PMU_DEVICES
];
static
int
__devinit
pmu_register
(
struct
platform_device
*
pdev
,
enum
arm_pmu_type
type
)
{
if
(
type
<
0
||
type
>=
ARM_NUM_PMU_DEVICES
)
{
pr_warning
(
"received registration request for unknown "
"PMU device type %d
\n
"
,
type
);
return
-
EINVAL
;
}
if
(
pmu_devices
[
type
])
{
pr_warning
(
"rejecting duplicate registration of PMU device "
"type %d."
,
type
);
return
-
ENOSPC
;
}
pr_info
(
"registered new PMU device of type %d
\n
"
,
type
);
pmu_devices
[
type
]
=
pdev
;
return
0
;
}
#define OF_MATCH_PMU(_name, _type) { \
.compatible = _name, \
.data = (void *)_type, \
}
#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
static
struct
of_device_id
armpmu_of_device_ids
[]
=
{
OF_MATCH_CPU
(
"arm,cortex-a9-pmu"
),
OF_MATCH_CPU
(
"arm,cortex-a8-pmu"
),
OF_MATCH_CPU
(
"arm,arm1136-pmu"
),
OF_MATCH_CPU
(
"arm,arm1176-pmu"
),
{},
};
#define PLAT_MATCH_PMU(_name, _type) { \
.name = _name, \
.driver_data = _type, \
}
#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
static
struct
platform_device_id
armpmu_plat_device_ids
[]
=
{
PLAT_MATCH_CPU
(
"arm-pmu"
),
{},
};
enum
arm_pmu_type
armpmu_device_type
(
struct
platform_device
*
pdev
)
{
const
struct
of_device_id
*
of_id
;
const
struct
platform_device_id
*
pdev_id
;
/* provided by of_device_id table */
if
(
pdev
->
dev
.
of_node
)
{
of_id
=
of_match_device
(
armpmu_of_device_ids
,
&
pdev
->
dev
);
BUG_ON
(
!
of_id
);
return
(
enum
arm_pmu_type
)
of_id
->
data
;
}
/* Provided by platform_device_id table */
pdev_id
=
platform_get_device_id
(
pdev
);
BUG_ON
(
!
pdev_id
);
return
pdev_id
->
driver_data
;
}
static
int
__devinit
armpmu_device_probe
(
struct
platform_device
*
pdev
)
{
return
pmu_register
(
pdev
,
armpmu_device_type
(
pdev
));
}
static
struct
platform_driver
armpmu_driver
=
{
.
driver
=
{
.
name
=
"arm-pmu"
,
.
of_match_table
=
armpmu_of_device_ids
,
},
.
probe
=
armpmu_device_probe
,
.
id_table
=
armpmu_plat_device_ids
,
};
static
int
__init
register_pmu_driver
(
void
)
{
return
platform_driver_register
(
&
armpmu_driver
);
}
device_initcall
(
register_pmu_driver
);
/*
* PMU locking to ensure mutual exclusion between different subsystems.
*/
static
unsigned
long
pmu_lock
[
BITS_TO_LONGS
(
ARM_NUM_PMU_DEVICES
)];
struct
platform_device
*
int
reserve_pmu
(
enum
arm_pmu_type
type
)
{
struct
platform_device
*
pdev
;
if
(
test_and_set_bit_lock
(
type
,
&
pmu_lock
))
{
pdev
=
ERR_PTR
(
-
EBUSY
);
}
else
if
(
pmu_devices
[
type
]
==
NULL
)
{
clear_bit_unlock
(
type
,
&
pmu_lock
);
pdev
=
ERR_PTR
(
-
ENODEV
);
}
else
{
pdev
=
pmu_devices
[
type
];
}
return
pdev
;
return
test_and_set_bit_lock
(
type
,
pmu_lock
)
?
-
EBUSY
:
0
;
}
EXPORT_SYMBOL_GPL
(
reserve_pmu
);
int
void
release_pmu
(
enum
arm_pmu_type
type
)
{
if
(
WARN_ON
(
!
pmu_devices
[
type
]))
return
-
EINVAL
;
clear_bit_unlock
(
type
,
&
pmu_lock
);
return
0
;
}
EXPORT_SYMBOL_GPL
(
release_pmu
);
static
int
set_irq_affinity
(
int
irq
,
unsigned
int
cpu
)
{
#ifdef CONFIG_SMP
int
err
=
irq_set_affinity
(
irq
,
cpumask_of
(
cpu
));
if
(
err
)
pr_warning
(
"unable to set irq affinity (irq=%d, cpu=%u)
\n
"
,
irq
,
cpu
);
return
err
;
#else
return
-
EINVAL
;
#endif
}
static
int
init_cpu_pmu
(
void
)
{
int
i
,
irqs
,
err
=
0
;
struct
platform_device
*
pdev
=
pmu_devices
[
ARM_PMU_DEVICE_CPU
];
if
(
!
pdev
)
return
-
ENODEV
;
irqs
=
pdev
->
num_resources
;
/*
* If we have a single PMU interrupt that we can't shift, assume that
* we're running on a uniprocessor machine and continue.
*/
if
(
irqs
==
1
&&
!
irq_can_set_affinity
(
platform_get_irq
(
pdev
,
0
)))
return
0
;
for
(
i
=
0
;
i
<
irqs
;
++
i
)
{
err
=
set_irq_affinity
(
platform_get_irq
(
pdev
,
i
),
i
);
if
(
err
)
break
;
}
return
err
;
}
int
init_pmu
(
enum
arm_pmu_type
type
)
{
int
err
=
0
;
switch
(
type
)
{
case
ARM_PMU_DEVICE_CPU
:
err
=
init_cpu_pmu
();
break
;
default:
pr_warning
(
"attempt to initialise PMU of unknown "
"type %d
\n
"
,
type
);
err
=
-
EINVAL
;
}
return
err
;
clear_bit_unlock
(
type
,
pmu_lock
);
}
EXPORT_SYMBOL_GPL
(
init_pmu
);
kernel/events/core.c
View file @
4fb0d2ea
...
...
@@ -5715,6 +5715,7 @@ struct pmu *perf_init_event(struct perf_event *event)
pmu
=
idr_find
(
&
pmu_idr
,
event
->
attr
.
type
);
rcu_read_unlock
();
if
(
pmu
)
{
event
->
pmu
=
pmu
;
ret
=
pmu
->
event_init
(
event
);
if
(
ret
)
pmu
=
ERR_PTR
(
ret
);
...
...
@@ -5722,6 +5723,7 @@ struct pmu *perf_init_event(struct perf_event *event)
}
list_for_each_entry_rcu
(
pmu
,
&
pmus
,
entry
)
{
event
->
pmu
=
pmu
;
ret
=
pmu
->
event_init
(
event
);
if
(
!
ret
)
goto
unlock
;
...
...
@@ -5848,8 +5850,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
return
ERR_PTR
(
err
);
}
event
->
pmu
=
pmu
;
if
(
!
event
->
parent
)
{
if
(
event
->
attach_state
&
PERF_ATTACH_TASK
)
jump_label_inc
(
&
perf_sched_events
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment