Commit 4a0deca6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: generic context switch event

Impact: cleanup

Use the generic software events for context switches.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Orig-LKML-Reference: <20090319194233.283522645@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 01ef09d9
...@@ -138,7 +138,6 @@ extern unsigned long nr_running(void); ...@@ -138,7 +138,6 @@ extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void); extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void); extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void); extern unsigned long nr_iowait(void);
extern u64 cpu_nr_switches(int cpu);
extern u64 cpu_nr_migrations(int cpu); extern u64 cpu_nr_migrations(int cpu);
extern unsigned long get_parent_ip(unsigned long addr); extern unsigned long get_parent_ip(unsigned long addr);
......
...@@ -710,10 +710,13 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) ...@@ -710,10 +710,13 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
{ {
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
struct perf_counter_context *ctx = &task->perf_counter_ctx; struct perf_counter_context *ctx = &task->perf_counter_ctx;
struct pt_regs *regs;
if (likely(!cpuctx->task_ctx)) if (likely(!cpuctx->task_ctx))
return; return;
regs = task_pt_regs(task);
perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
__perf_counter_sched_out(ctx, cpuctx); __perf_counter_sched_out(ctx, cpuctx);
cpuctx->task_ctx = NULL; cpuctx->task_ctx = NULL;
...@@ -1667,58 +1670,6 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { ...@@ -1667,58 +1670,6 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = {
.read = task_clock_perf_counter_read, .read = task_clock_perf_counter_read,
}; };
/*
* Software counter: context switches
*/
static u64 get_context_switches(struct perf_counter *counter)
{
struct task_struct *curr = counter->ctx->task;
if (curr)
return curr->nvcsw + curr->nivcsw;
return cpu_nr_switches(smp_processor_id());
}
static void context_switches_perf_counter_update(struct perf_counter *counter)
{
u64 prev, now;
s64 delta;
prev = atomic64_read(&counter->hw.prev_count);
now = get_context_switches(counter);
atomic64_set(&counter->hw.prev_count, now);
delta = now - prev;
atomic64_add(delta, &counter->count);
}
static void context_switches_perf_counter_read(struct perf_counter *counter)
{
context_switches_perf_counter_update(counter);
}
static int context_switches_perf_counter_enable(struct perf_counter *counter)
{
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
atomic64_set(&counter->hw.prev_count,
get_context_switches(counter));
return 0;
}
static void context_switches_perf_counter_disable(struct perf_counter *counter)
{
context_switches_perf_counter_update(counter);
}
static const struct hw_perf_counter_ops perf_ops_context_switches = {
.enable = context_switches_perf_counter_enable,
.disable = context_switches_perf_counter_disable,
.read = context_switches_perf_counter_read,
};
/* /*
* Software counter: cpu migrations * Software counter: cpu migrations
*/ */
...@@ -1808,11 +1759,8 @@ sw_perf_counter_init(struct perf_counter *counter) ...@@ -1808,11 +1759,8 @@ sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_PAGE_FAULTS: case PERF_COUNT_PAGE_FAULTS:
case PERF_COUNT_PAGE_FAULTS_MIN: case PERF_COUNT_PAGE_FAULTS_MIN:
case PERF_COUNT_PAGE_FAULTS_MAJ: case PERF_COUNT_PAGE_FAULTS_MAJ:
hw_ops = &perf_ops_generic;
break;
case PERF_COUNT_CONTEXT_SWITCHES: case PERF_COUNT_CONTEXT_SWITCHES:
if (!counter->hw_event.exclude_kernel) hw_ops = &perf_ops_generic;
hw_ops = &perf_ops_context_switches;
break; break;
case PERF_COUNT_CPU_MIGRATIONS: case PERF_COUNT_CPU_MIGRATIONS:
if (!counter->hw_event.exclude_kernel) if (!counter->hw_event.exclude_kernel)
......
...@@ -2900,14 +2900,8 @@ unsigned long nr_active(void) ...@@ -2900,14 +2900,8 @@ unsigned long nr_active(void)
/* /*
* Externally visible per-cpu scheduler statistics: * Externally visible per-cpu scheduler statistics:
* cpu_nr_switches(cpu) - number of context switches on that cpu
* cpu_nr_migrations(cpu) - number of migrations into that cpu * cpu_nr_migrations(cpu) - number of migrations into that cpu
*/ */
u64 cpu_nr_switches(int cpu)
{
return cpu_rq(cpu)->nr_switches;
}
u64 cpu_nr_migrations(int cpu) u64 cpu_nr_migrations(int cpu)
{ {
return cpu_rq(cpu)->nr_migrations_in; return cpu_rq(cpu)->nr_migrations_in;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment