Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8fb93313
Commit
8fb93313
authored
Dec 23, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
perfcounters: remove warnings
Impact: remove debug checks Signed-off-by:
Ingo Molnar
<
mingo@elte.hu
>
parent
94c46572
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
0 additions
and
19 deletions
+0
-19
arch/x86/kernel/cpu/perf_counter.c
arch/x86/kernel/cpu/perf_counter.c
+0
-7
include/linux/perf_counter.h
include/linux/perf_counter.h
+0
-4
kernel/perf_counter.c
kernel/perf_counter.c
+0
-8
No files found.
arch/x86/kernel/cpu/perf_counter.c
View file @
8fb93313
...
...
@@ -64,7 +64,6 @@ x86_perf_counter_update(struct perf_counter *counter,
{
u64
prev_raw_count
,
new_raw_count
,
delta
;
WARN_ON_ONCE
(
counter
->
state
!=
PERF_COUNTER_STATE_ACTIVE
);
/*
* Careful: an NMI might modify the previous counter value.
*
...
...
@@ -89,7 +88,6 @@ x86_perf_counter_update(struct perf_counter *counter,
* of the count, so we do that by clipping the delta to 32 bits:
*/
delta
=
(
u64
)(
u32
)((
s32
)
new_raw_count
-
(
s32
)
prev_raw_count
);
WARN_ON_ONCE
((
int
)
delta
<
0
);
atomic64_add
(
delta
,
&
counter
->
count
);
atomic64_sub
(
delta
,
&
hwc
->
period_left
);
...
...
@@ -193,7 +191,6 @@ __x86_perf_counter_disable(struct perf_counter *counter,
int
err
;
err
=
wrmsr_safe
(
hwc
->
config_base
+
idx
,
hwc
->
config
,
0
);
WARN_ON_ONCE
(
err
);
}
static
DEFINE_PER_CPU
(
u64
,
prev_left
[
MAX_HW_COUNTERS
]);
...
...
@@ -209,8 +206,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
s32
left
=
atomic64_read
(
&
hwc
->
period_left
);
s32
period
=
hwc
->
irq_period
;
WARN_ON_ONCE
(
period
<=
0
);
/*
* If we are way outside a reasoable range then just skip forward:
*/
...
...
@@ -224,8 +219,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
atomic64_set
(
&
hwc
->
period_left
,
left
);
}
WARN_ON_ONCE
(
left
<=
0
);
per_cpu
(
prev_left
[
idx
],
smp_processor_id
())
=
left
;
/*
...
...
include/linux/perf_counter.h
View file @
8fb93313
...
...
@@ -218,8 +218,6 @@ struct perf_cpu_context {
extern
int
perf_max_counters
;
#ifdef CONFIG_PERF_COUNTERS
extern
void
perf_counter_show
(
struct
perf_counter
*
counter
,
char
*
str
,
int
trace
);
extern
const
struct
hw_perf_counter_ops
*
hw_perf_counter_init
(
struct
perf_counter
*
counter
);
...
...
@@ -237,8 +235,6 @@ extern int perf_counter_task_enable(void);
#else
static
inline
void
perf_counter_show
(
struct
perf_counter
*
counter
,
char
*
str
,
int
trace
)
{
}
static
inline
void
perf_counter_task_sched_in
(
struct
task_struct
*
task
,
int
cpu
)
{
}
static
inline
void
perf_counter_task_sched_out
(
struct
task_struct
*
task
,
int
cpu
)
{
}
...
...
kernel/perf_counter.c
View file @
8fb93313
...
...
@@ -861,8 +861,6 @@ static void task_clock_perf_counter_update(struct perf_counter *counter)
atomic64_set
(
&
counter
->
hw
.
prev_count
,
now
);
delta
=
now
-
prev
;
if
(
WARN_ON_ONCE
(
delta
<
0
))
delta
=
0
;
atomic64_add
(
delta
,
&
counter
->
count
);
}
...
...
@@ -906,8 +904,6 @@ static void page_faults_perf_counter_update(struct perf_counter *counter)
atomic64_set
(
&
counter
->
hw
.
prev_count
,
now
);
delta
=
now
-
prev
;
if
(
WARN_ON_ONCE
(
delta
<
0
))
delta
=
0
;
atomic64_add
(
delta
,
&
counter
->
count
);
}
...
...
@@ -954,8 +950,6 @@ static void context_switches_perf_counter_update(struct perf_counter *counter)
atomic64_set
(
&
counter
->
hw
.
prev_count
,
now
);
delta
=
now
-
prev
;
if
(
WARN_ON_ONCE
(
delta
<
0
))
delta
=
0
;
atomic64_add
(
delta
,
&
counter
->
count
);
}
...
...
@@ -1000,8 +994,6 @@ static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
atomic64_set
(
&
counter
->
hw
.
prev_count
,
now
);
delta
=
now
-
prev
;
if
(
WARN_ON_ONCE
(
delta
<
0
))
delta
=
0
;
atomic64_add
(
delta
,
&
counter
->
count
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment