Commit f6c7d5fe authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: theres more to overflow than writing events

Prepare for more generic overflow handling. The new perf_counter_overflow()
method will handle the generic bits of the counter overflow, and can return
a !0 return value, in which case the counter should be (soft) disabled, so
that it won't count until it's properly disabled.

XXX: do powerpc and swcounter
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094517.812109629@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b6276f35
...@@ -732,7 +732,7 @@ static void record_and_restart(struct perf_counter *counter, long val, ...@@ -732,7 +732,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
* Finally record data if requested. * Finally record data if requested.
*/ */
if (record) if (record)
perf_counter_output(counter, 1, regs); perf_counter_overflow(counter, 1, regs);
} }
/* /*
......
...@@ -800,7 +800,8 @@ static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) ...@@ -800,7 +800,8 @@ static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
continue; continue;
perf_save_and_restart(counter); perf_save_and_restart(counter);
perf_counter_output(counter, nmi, regs); if (perf_counter_overflow(counter, nmi, regs))
__pmc_generic_disable(counter, &counter->hw, bit);
} }
hw_perf_ack_status(ack); hw_perf_ack_status(ack);
......
...@@ -491,7 +491,7 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, ...@@ -491,7 +491,7 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_counter_context *ctx, int cpu); struct perf_counter_context *ctx, int cpu);
extern void perf_counter_update_userpage(struct perf_counter *counter); extern void perf_counter_update_userpage(struct perf_counter *counter);
extern void perf_counter_output(struct perf_counter *counter, extern int perf_counter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs); int nmi, struct pt_regs *regs);
/* /*
* Return 1 for a software counter, 0 for a hardware counter * Return 1 for a software counter, 0 for a hardware counter
......
...@@ -1800,7 +1800,7 @@ static void perf_output_end(struct perf_output_handle *handle) ...@@ -1800,7 +1800,7 @@ static void perf_output_end(struct perf_output_handle *handle)
rcu_read_unlock(); rcu_read_unlock();
} }
void perf_counter_output(struct perf_counter *counter, static void perf_counter_output(struct perf_counter *counter,
int nmi, struct pt_regs *regs) int nmi, struct pt_regs *regs)
{ {
int ret; int ret;
...@@ -2033,6 +2033,17 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, ...@@ -2033,6 +2033,17 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
perf_counter_mmap_event(&mmap_event); perf_counter_mmap_event(&mmap_event);
} }
/*
* Generic counter overflow handling.
*/
int perf_counter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs)
{
perf_counter_output(counter, nmi, regs);
return 0;
}
/* /*
* Generic software counter infrastructure * Generic software counter infrastructure
*/ */
...@@ -2077,6 +2088,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter) ...@@ -2077,6 +2088,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{ {
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_counter *counter; struct perf_counter *counter;
struct pt_regs *regs; struct pt_regs *regs;
...@@ -2092,12 +2104,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) ...@@ -2092,12 +2104,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
!counter->hw_event.exclude_user) !counter->hw_event.exclude_user)
regs = task_pt_regs(current); regs = task_pt_regs(current);
if (regs) if (regs) {
perf_counter_output(counter, 0, regs); if (perf_counter_overflow(counter, 0, regs))
ret = HRTIMER_NORESTART;
}
hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
return HRTIMER_RESTART; return ret;
} }
static void perf_swcounter_overflow(struct perf_counter *counter, static void perf_swcounter_overflow(struct perf_counter *counter,
...@@ -2105,7 +2119,10 @@ static void perf_swcounter_overflow(struct perf_counter *counter, ...@@ -2105,7 +2119,10 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
{ {
perf_swcounter_update(counter); perf_swcounter_update(counter);
perf_swcounter_set_period(counter); perf_swcounter_set_period(counter);
perf_counter_output(counter, nmi, regs); if (perf_counter_overflow(counter, nmi, regs))
/* soft-disable the counter */
;
} }
static int perf_swcounter_match(struct perf_counter *counter, static int perf_swcounter_match(struct perf_counter *counter,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment