Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
862a1a5f
Commit
862a1a5f
authored
Dec 17, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
x86, perfcounters: refactor code for fixed-function PMCs
Impact: clean up Signed-off-by:
Ingo Molnar
<
mingo@elte.hu
>
parent
703e937c
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
52 additions
and
35 deletions
+52
-35
arch/x86/include/asm/perf_counter.h
arch/x86/include/asm/perf_counter.h
+13
-1
arch/x86/kernel/cpu/perf_counter.c
arch/x86/kernel/cpu/perf_counter.c
+39
-34
No files found.
arch/x86/include/asm/perf_counter.h
View file @
862a1a5f
...
...
@@ -8,6 +8,10 @@
#define X86_PMC_MAX_GENERIC 8
#define X86_PMC_MAX_FIXED 3
#define X86_PMC_IDX_GENERIC 0
#define X86_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
...
...
@@ -54,6 +58,15 @@ union cpuid10_edx {
* Fixed-purpose performance counters:
*/
/*
* All 3 fixed-mode PMCs are configured via this single MSR:
*/
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
/*
* The counts are available in three separate MSRs:
*/
/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
...
...
@@ -63,7 +76,6 @@ union cpuid10_edx {
/* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
#ifdef CONFIG_PERF_COUNTERS
extern
void
init_hw_perf_counters
(
void
);
extern
void
perf_counters_lapic_init
(
int
nmi
);
...
...
arch/x86/kernel/cpu/perf_counter.c
View file @
862a1a5f
...
...
@@ -24,17 +24,14 @@ static bool perf_counters_initialized __read_mostly;
/*
* Number of (generic) HW counters:
*/
static
int
nr_
hw_counters
__read_mostly
;
static
u
32
perf_counter_mask
__read_mostly
;
static
int
nr_
counters_generic
__read_mostly
;
static
u
64
perf_counter_mask
__read_mostly
;
static
int
nr_
hw_
counters_fixed
__read_mostly
;
static
int
nr_counters_fixed
__read_mostly
;
struct
cpu_hw_counters
{
struct
perf_counter
*
generic
[
X86_PMC_MAX_GENERIC
];
unsigned
long
used
[
BITS_TO_LONGS
(
X86_PMC_MAX_GENERIC
)];
struct
perf_counter
*
fixed
[
X86_PMC_MAX_FIXED
];
unsigned
long
used_fixed
[
BITS_TO_LONGS
(
X86_PMC_MAX_FIXED
)];
struct
perf_counter
*
counters
[
X86_PMC_IDX_MAX
];
unsigned
long
used
[
BITS_TO_LONGS
(
X86_PMC_IDX_MAX
)];
};
/*
...
...
@@ -159,7 +156,7 @@ void hw_perf_enable_all(void)
if
(
unlikely
(
!
perf_counters_initialized
))
return
;
wrmsr
(
MSR_CORE_PERF_GLOBAL_CTRL
,
perf_counter_mask
,
0
);
wrmsr
l
(
MSR_CORE_PERF_GLOBAL_CTRL
,
perf_counter_mask
);
}
u64
hw_perf_save_disable
(
void
)
...
...
@@ -170,7 +167,7 @@ u64 hw_perf_save_disable(void)
return
0
;
rdmsrl
(
MSR_CORE_PERF_GLOBAL_CTRL
,
ctrl
);
wrmsr
(
MSR_CORE_PERF_GLOBAL_CTRL
,
0
,
0
);
wrmsr
l
(
MSR_CORE_PERF_GLOBAL_CTRL
,
0
);
return
ctrl
;
}
...
...
@@ -181,7 +178,7 @@ void hw_perf_restore(u64 ctrl)
if
(
unlikely
(
!
perf_counters_initialized
))
return
;
wrmsr
(
MSR_CORE_PERF_GLOBAL_CTRL
,
ctrl
,
0
);
wrmsr
l
(
MSR_CORE_PERF_GLOBAL_CTRL
,
ctrl
);
}
EXPORT_SYMBOL_GPL
(
hw_perf_restore
);
...
...
@@ -239,6 +236,11 @@ __pmc_generic_enable(struct perf_counter *counter,
hwc
->
config
|
ARCH_PERFMON_EVENTSEL0_ENABLE
,
0
);
}
static
int
fixed_mode_idx
(
struct
hw_perf_counter
*
hwc
)
{
return
-
1
;
}
/*
* Find a PMC slot for the freshly enabled / scheduled in counter:
*/
...
...
@@ -250,7 +252,7 @@ static void pmc_generic_enable(struct perf_counter *counter)
/* Try to get the previous counter again */
if
(
test_and_set_bit
(
idx
,
cpuc
->
used
))
{
idx
=
find_first_zero_bit
(
cpuc
->
used
,
nr_
hw_counters
);
idx
=
find_first_zero_bit
(
cpuc
->
used
,
nr_
counters_generic
);
set_bit
(
idx
,
cpuc
->
used
);
hwc
->
idx
=
idx
;
}
...
...
@@ -259,7 +261,7 @@ static void pmc_generic_enable(struct perf_counter *counter)
__pmc_generic_disable
(
counter
,
hwc
,
idx
);
cpuc
->
generic
[
idx
]
=
counter
;
cpuc
->
counters
[
idx
]
=
counter
;
__hw_perf_counter_set_period
(
counter
,
hwc
,
idx
);
__pmc_generic_enable
(
counter
,
hwc
,
idx
);
...
...
@@ -270,7 +272,7 @@ void perf_counter_print_debug(void)
u64
ctrl
,
status
,
overflow
,
pmc_ctrl
,
pmc_count
,
prev_left
;
int
cpu
,
idx
;
if
(
!
nr_
hw_counters
)
if
(
!
nr_
counters_generic
)
return
;
local_irq_disable
();
...
...
@@ -286,7 +288,7 @@ void perf_counter_print_debug(void)
printk
(
KERN_INFO
"CPU#%d: status: %016llx
\n
"
,
cpu
,
status
);
printk
(
KERN_INFO
"CPU#%d: overflow: %016llx
\n
"
,
cpu
,
overflow
);
for
(
idx
=
0
;
idx
<
nr_
hw_counters
;
idx
++
)
{
for
(
idx
=
0
;
idx
<
nr_
counters_generic
;
idx
++
)
{
rdmsrl
(
MSR_ARCH_PERFMON_EVENTSEL0
+
idx
,
pmc_ctrl
);
rdmsrl
(
MSR_ARCH_PERFMON_PERFCTR0
+
idx
,
pmc_count
);
...
...
@@ -311,7 +313,7 @@ static void pmc_generic_disable(struct perf_counter *counter)
__pmc_generic_disable
(
counter
,
hwc
,
idx
);
clear_bit
(
idx
,
cpuc
->
used
);
cpuc
->
generic
[
idx
]
=
NULL
;
cpuc
->
counters
[
idx
]
=
NULL
;
/*
* Drain the remaining delta count out of a counter
...
...
@@ -381,7 +383,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
rdmsrl
(
MSR_CORE_PERF_GLOBAL_CTRL
,
saved_global
);
/* Disable counters globally */
wrmsr
(
MSR_CORE_PERF_GLOBAL_CTRL
,
0
,
0
);
wrmsr
l
(
MSR_CORE_PERF_GLOBAL_CTRL
,
0
);
ack_APIC_irq
();
cpuc
=
&
per_cpu
(
cpu_hw_counters
,
cpu
);
...
...
@@ -392,8 +394,8 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
again:
ack
=
status
;
for_each_bit
(
bit
,
(
unsigned
long
*
)
&
status
,
nr_
hw_counters
)
{
struct
perf_counter
*
counter
=
cpuc
->
generic
[
bit
];
for_each_bit
(
bit
,
(
unsigned
long
*
)
&
status
,
nr_
counters_generic
)
{
struct
perf_counter
*
counter
=
cpuc
->
counters
[
bit
];
clear_bit
(
bit
,
(
unsigned
long
*
)
&
status
);
if
(
!
counter
)
...
...
@@ -424,7 +426,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
}
}
wrmsr
(
MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
ack
,
0
);
wrmsr
l
(
MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
ack
);
/*
* Repeat if there is more work to be done:
...
...
@@ -436,7 +438,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
/*
* Restore - do not reenable when global enable is off:
*/
wrmsr
(
MSR_CORE_PERF_GLOBAL_CTRL
,
saved_global
,
0
);
wrmsr
l
(
MSR_CORE_PERF_GLOBAL_CTRL
,
saved_global
);
}
void
smp_perf_counter_interrupt
(
struct
pt_regs
*
regs
)
...
...
@@ -462,8 +464,8 @@ void perf_counter_notify(struct pt_regs *regs)
cpu
=
smp_processor_id
();
cpuc
=
&
per_cpu
(
cpu_hw_counters
,
cpu
);
for_each_bit
(
bit
,
cpuc
->
used
,
nr_hw_counters
)
{
struct
perf_counter
*
counter
=
cpuc
->
generic
[
bit
];
for_each_bit
(
bit
,
cpuc
->
used
,
X86_PMC_IDX_MAX
)
{
struct
perf_counter
*
counter
=
cpuc
->
counters
[
bit
];
if
(
!
counter
)
continue
;
...
...
@@ -540,26 +542,29 @@ void __init init_hw_perf_counters(void)
printk
(
KERN_INFO
"... version: %d
\n
"
,
eax
.
split
.
version_id
);
printk
(
KERN_INFO
"... num counters: %d
\n
"
,
eax
.
split
.
num_counters
);
nr_
hw_counters
=
eax
.
split
.
num_counters
;
if
(
nr_
hw_counters
>
X86_PMC_MAX_GENERIC
)
{
nr_
hw_counters
=
X86_PMC_MAX_GENERIC
;
nr_
counters_generic
=
eax
.
split
.
num_counters
;
if
(
nr_
counters_generic
>
X86_PMC_MAX_GENERIC
)
{
nr_
counters_generic
=
X86_PMC_MAX_GENERIC
;
WARN
(
1
,
KERN_ERR
"hw perf counters %d > max(%d), clipping!"
,
nr_
hw_counters
,
X86_PMC_MAX_GENERIC
);
nr_
counters_generic
,
X86_PMC_MAX_GENERIC
);
}
perf_counter_mask
=
(
1
<<
nr_
hw_counters
)
-
1
;
perf_max_counters
=
nr_
hw_counters
;
perf_counter_mask
=
(
1
<<
nr_
counters_generic
)
-
1
;
perf_max_counters
=
nr_
counters_generic
;
printk
(
KERN_INFO
"... bit width: %d
\n
"
,
eax
.
split
.
bit_width
);
printk
(
KERN_INFO
"... mask length: %d
\n
"
,
eax
.
split
.
mask_length
);
nr_
hw_
counters_fixed
=
edx
.
split
.
num_counters_fixed
;
if
(
nr_
hw_
counters_fixed
>
X86_PMC_MAX_FIXED
)
{
nr_
hw_
counters_fixed
=
X86_PMC_MAX_FIXED
;
nr_counters_fixed
=
edx
.
split
.
num_counters_fixed
;
if
(
nr_counters_fixed
>
X86_PMC_MAX_FIXED
)
{
nr_counters_fixed
=
X86_PMC_MAX_FIXED
;
WARN
(
1
,
KERN_ERR
"hw perf counters fixed %d > max(%d), clipping!"
,
nr_
hw_
counters_fixed
,
X86_PMC_MAX_FIXED
);
nr_counters_fixed
,
X86_PMC_MAX_FIXED
);
}
printk
(
KERN_INFO
"... fixed counters: %d
\n
"
,
nr_hw_counters_fixed
);
printk
(
KERN_INFO
"... fixed counters: %d
\n
"
,
nr_counters_fixed
);
perf_counter_mask
|=
((
1LL
<<
nr_counters_fixed
)
-
1
)
<<
X86_PMC_IDX_FIXED
;
printk
(
KERN_INFO
"... counter mask: %016Lx
\n
"
,
perf_counter_mask
);
perf_counters_initialized
=
true
;
perf_counters_lapic_init
(
0
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment