Commit 0322cd6e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: unify irq output code

Impact: cleanup

Having 3 slightly different copies of the same code around does nobody
any good. First step in revamping the output format.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Orig-LKML-Reference: <20090319194233.929962222@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b8e83514
...@@ -662,41 +662,6 @@ void perf_counter_do_pending(void) ...@@ -662,41 +662,6 @@ void perf_counter_do_pending(void)
} }
} }
/*
* Record data for an irq counter.
* This function was lifted from the x86 code; maybe it should
* go in the core?
*/
static void perf_store_irq_data(struct perf_counter *counter, u64 data)
{
struct perf_data *irqdata = counter->irqdata;
if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
irqdata->overrun++;
} else {
u64 *p = (u64 *) &irqdata->data[irqdata->len];
*p = data;
irqdata->len += sizeof(u64);
}
}
/*
* Record all the values of the counters in a group
*/
static void perf_handle_group(struct perf_counter *counter)
{
struct perf_counter *leader, *sub;
leader = counter->group_leader;
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
if (sub != counter)
sub->hw_ops->read(sub);
perf_store_irq_data(counter, sub->hw_event.event_config);
perf_store_irq_data(counter, atomic64_read(&sub->count));
}
}
/* /*
* A counter has overflowed; update its count and record * A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled * things if requested. Note that interrupts are hard-disabled
...@@ -736,20 +701,8 @@ static void record_and_restart(struct perf_counter *counter, long val, ...@@ -736,20 +701,8 @@ static void record_and_restart(struct perf_counter *counter, long val,
/* /*
* Finally record data if requested. * Finally record data if requested.
*/ */
if (record) { if (record)
switch (counter->hw_event.record_type) { perf_counter_output(counter, 1, regs);
case PERF_RECORD_SIMPLE:
break;
case PERF_RECORD_IRQ:
perf_store_irq_data(counter, instruction_pointer(regs));
counter->wakeup_pending = 1;
break;
case PERF_RECORD_GROUP:
perf_handle_group(counter);
counter->wakeup_pending = 1;
break;
}
}
} }
/* /*
......
...@@ -674,20 +674,6 @@ static void pmc_generic_disable(struct perf_counter *counter) ...@@ -674,20 +674,6 @@ static void pmc_generic_disable(struct perf_counter *counter)
x86_perf_counter_update(counter, hwc, idx); x86_perf_counter_update(counter, hwc, idx);
} }
static void perf_store_irq_data(struct perf_counter *counter, u64 data)
{
struct perf_data *irqdata = counter->irqdata;
if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
irqdata->overrun++;
} else {
u64 *p = (u64 *) &irqdata->data[irqdata->len];
*p = data;
irqdata->len += sizeof(u64);
}
}
/* /*
* Save and restart an expired counter. Called by NMI contexts, * Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops: * so it has to be careful about preempting normal counter ops:
...@@ -704,22 +690,6 @@ static void perf_save_and_restart(struct perf_counter *counter) ...@@ -704,22 +690,6 @@ static void perf_save_and_restart(struct perf_counter *counter)
__pmc_generic_enable(counter, hwc, idx); __pmc_generic_enable(counter, hwc, idx);
} }
static void
perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
{
struct perf_counter *counter, *group_leader = sibling->group_leader;
/*
* Store sibling timestamps (if any):
*/
list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
perf_store_irq_data(sibling, counter->hw_event.event_config);
perf_store_irq_data(sibling, atomic64_read(&counter->count));
}
}
/* /*
* Maximum interrupt frequency of 100KHz per CPU * Maximum interrupt frequency of 100KHz per CPU
*/ */
...@@ -754,28 +724,7 @@ static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) ...@@ -754,28 +724,7 @@ static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
continue; continue;
perf_save_and_restart(counter); perf_save_and_restart(counter);
perf_counter_output(counter, nmi, regs);
switch (counter->hw_event.record_type) {
case PERF_RECORD_SIMPLE:
continue;
case PERF_RECORD_IRQ:
perf_store_irq_data(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
perf_handle_group(counter, &status, &ack);
break;
}
/*
* From NMI context we cannot call into the scheduler to
* do a task wakeup - but we mark these generic as
* wakeup_pending and initate a wakeup callback:
*/
if (nmi) {
counter->wakeup_pending = 1;
set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
} else {
wake_up(&counter->waitq);
}
} }
hw_perf_ack_status(ack); hw_perf_ack_status(ack);
......
...@@ -317,6 +317,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, ...@@ -317,6 +317,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx, int cpu); struct perf_counter_context *ctx, int cpu);
extern void perf_counter_output(struct perf_counter *counter,
int nmi, struct pt_regs *regs);
/* /*
* Return 1 for a software counter, 0 for a hardware counter * Return 1 for a software counter, 0 for a hardware counter
*/ */
......
...@@ -1353,6 +1353,60 @@ static const struct file_operations perf_fops = { ...@@ -1353,6 +1353,60 @@ static const struct file_operations perf_fops = {
.compat_ioctl = perf_ioctl, .compat_ioctl = perf_ioctl,
}; };
/*
* Output
*/
static void perf_counter_store_irq(struct perf_counter *counter, u64 data)
{
struct perf_data *irqdata = counter->irqdata;
if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
irqdata->overrun++;
} else {
u64 *p = (u64 *) &irqdata->data[irqdata->len];
*p = data;
irqdata->len += sizeof(u64);
}
}
static void perf_counter_handle_group(struct perf_counter *counter)
{
struct perf_counter *leader, *sub;
leader = counter->group_leader;
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
if (sub != counter)
sub->hw_ops->read(sub);
perf_counter_store_irq(counter, sub->hw_event.event_config);
perf_counter_store_irq(counter, atomic64_read(&sub->count));
}
}
void perf_counter_output(struct perf_counter *counter,
int nmi, struct pt_regs *regs)
{
switch (counter->hw_event.record_type) {
case PERF_RECORD_SIMPLE:
return;
case PERF_RECORD_IRQ:
perf_counter_store_irq(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
perf_counter_handle_group(counter);
break;
}
if (nmi) {
counter->wakeup_pending = 1;
set_perf_counter_pending();
} else
wake_up(&counter->waitq);
}
/* /*
* Generic software counter infrastructure * Generic software counter infrastructure
*/ */
...@@ -1395,54 +1449,6 @@ static void perf_swcounter_set_period(struct perf_counter *counter) ...@@ -1395,54 +1449,6 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
atomic64_set(&hwc->count, -left); atomic64_set(&hwc->count, -left);
} }
static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data)
{
struct perf_data *irqdata = counter->irqdata;
if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
irqdata->overrun++;
} else {
u64 *p = (u64 *) &irqdata->data[irqdata->len];
*p = data;
irqdata->len += sizeof(u64);
}
}
static void perf_swcounter_handle_group(struct perf_counter *sibling)
{
struct perf_counter *counter, *group_leader = sibling->group_leader;
list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
counter->hw_ops->read(counter);
perf_swcounter_store_irq(sibling, counter->hw_event.event_config);
perf_swcounter_store_irq(sibling, atomic64_read(&counter->count));
}
}
static void perf_swcounter_interrupt(struct perf_counter *counter,
int nmi, struct pt_regs *regs)
{
switch (counter->hw_event.record_type) {
case PERF_RECORD_SIMPLE:
break;
case PERF_RECORD_IRQ:
perf_swcounter_store_irq(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
perf_swcounter_handle_group(counter);
break;
}
if (nmi) {
counter->wakeup_pending = 1;
set_perf_counter_pending();
} else
wake_up(&counter->waitq);
}
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{ {
struct perf_counter *counter; struct perf_counter *counter;
...@@ -1461,7 +1467,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) ...@@ -1461,7 +1467,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
regs = task_pt_regs(current); regs = task_pt_regs(current);
if (regs) if (regs)
perf_swcounter_interrupt(counter, 0, regs); perf_counter_output(counter, 0, regs);
hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
...@@ -1473,7 +1479,7 @@ static void perf_swcounter_overflow(struct perf_counter *counter, ...@@ -1473,7 +1479,7 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
{ {
perf_swcounter_update(counter); perf_swcounter_update(counter);
perf_swcounter_set_period(counter); perf_swcounter_set_period(counter);
perf_swcounter_interrupt(counter, nmi, regs); perf_counter_output(counter, nmi, regs);
} }
static int perf_swcounter_match(struct perf_counter *counter, static int perf_swcounter_match(struct perf_counter *counter,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment