Commit 6a930700 authored by Ingo Molnar's avatar Ingo Molnar

perf counters: clean up state transitions

Impact: cleanup

Introduce a proper enum for the 3 states of a counter:

	PERF_COUNTER_STATE_OFF		= -1
	PERF_COUNTER_STATE_INACTIVE	=  0
	PERF_COUNTER_STATE_ACTIVE	=  1

and rename counter->active to counter->state and propagate the
changes everywhere.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1d1c7ddb
...@@ -332,7 +332,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) ...@@ -332,7 +332,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
* Then store sibling timestamps (if any): * Then store sibling timestamps (if any):
*/ */
list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
if (!counter->active) { if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
/* /*
* When counter was not in the overflow mask, we have to * When counter was not in the overflow mask, we have to
* read it from hardware. We read it as well, when it * read it from hardware. We read it as well, when it
......
...@@ -127,6 +127,15 @@ struct hw_perf_counter_ops { ...@@ -127,6 +127,15 @@ struct hw_perf_counter_ops {
void (*hw_perf_counter_read) (struct perf_counter *counter); void (*hw_perf_counter_read) (struct perf_counter *counter);
}; };
/**
* enum perf_counter_active_state - the states of a counter
*/
enum perf_counter_active_state {
PERF_COUNTER_STATE_OFF = -1,
PERF_COUNTER_STATE_INACTIVE = 0,
PERF_COUNTER_STATE_ACTIVE = 1,
};
/** /**
* struct perf_counter - performance counter kernel representation: * struct perf_counter - performance counter kernel representation:
*/ */
...@@ -136,7 +145,7 @@ struct perf_counter { ...@@ -136,7 +145,7 @@ struct perf_counter {
struct perf_counter *group_leader; struct perf_counter *group_leader;
const struct hw_perf_counter_ops *hw_ops; const struct hw_perf_counter_ops *hw_ops;
int active; enum perf_counter_active_state state;
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
atomic64_t count; atomic64_t count;
#else #else
......
...@@ -167,9 +167,9 @@ static void __perf_counter_remove_from_context(void *info) ...@@ -167,9 +167,9 @@ static void __perf_counter_remove_from_context(void *info)
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
if (counter->active) { if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
counter->hw_ops->hw_perf_counter_disable(counter); counter->hw_ops->hw_perf_counter_disable(counter);
counter->active = 0; counter->state = PERF_COUNTER_STATE_INACTIVE;
ctx->nr_active--; ctx->nr_active--;
cpuctx->active_oncpu--; cpuctx->active_oncpu--;
counter->task = NULL; counter->task = NULL;
...@@ -281,7 +281,7 @@ static void __perf_install_in_context(void *info) ...@@ -281,7 +281,7 @@ static void __perf_install_in_context(void *info)
if (cpuctx->active_oncpu < perf_max_counters) { if (cpuctx->active_oncpu < perf_max_counters) {
counter->hw_ops->hw_perf_counter_enable(counter); counter->hw_ops->hw_perf_counter_enable(counter);
counter->active = 1; counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu; counter->oncpu = cpu;
ctx->nr_active++; ctx->nr_active++;
cpuctx->active_oncpu++; cpuctx->active_oncpu++;
...@@ -328,7 +328,6 @@ perf_install_in_context(struct perf_counter_context *ctx, ...@@ -328,7 +328,6 @@ perf_install_in_context(struct perf_counter_context *ctx,
spin_lock_irq(&ctx->lock); spin_lock_irq(&ctx->lock);
/* /*
* If the context is active and the counter has not been added
* we need to retry the smp call. * we need to retry the smp call.
*/ */
if (ctx->nr_active && list_empty(&counter->list_entry)) { if (ctx->nr_active && list_empty(&counter->list_entry)) {
...@@ -353,12 +352,12 @@ counter_sched_out(struct perf_counter *counter, ...@@ -353,12 +352,12 @@ counter_sched_out(struct perf_counter *counter,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx) struct perf_counter_context *ctx)
{ {
if (!counter->active) if (counter->state != PERF_COUNTER_STATE_ACTIVE)
return; return;
counter->hw_ops->hw_perf_counter_disable(counter); counter->hw_ops->hw_perf_counter_disable(counter);
counter->active = 0; counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->oncpu = -1; counter->oncpu = -1;
cpuctx->active_oncpu--; cpuctx->active_oncpu--;
ctx->nr_active--; ctx->nr_active--;
...@@ -415,11 +414,11 @@ counter_sched_in(struct perf_counter *counter, ...@@ -415,11 +414,11 @@ counter_sched_in(struct perf_counter *counter,
struct perf_counter_context *ctx, struct perf_counter_context *ctx,
int cpu) int cpu)
{ {
if (counter->active == -1) if (counter->state == PERF_COUNTER_STATE_OFF)
return; return;
counter->hw_ops->hw_perf_counter_enable(counter); counter->hw_ops->hw_perf_counter_enable(counter);
counter->active = 1; counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
cpuctx->active_oncpu++; cpuctx->active_oncpu++;
...@@ -506,8 +505,8 @@ int perf_counter_task_disable(void) ...@@ -506,8 +505,8 @@ int perf_counter_task_disable(void)
perf_flags = hw_perf_save_disable(); perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) { list_for_each_entry(counter, &ctx->counter_list, list_entry) {
WARN_ON_ONCE(counter->active == 1); WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE);
counter->active = -1; counter->state = PERF_COUNTER_STATE_OFF;
} }
hw_perf_restore(perf_flags); hw_perf_restore(perf_flags);
...@@ -540,9 +539,9 @@ int perf_counter_task_enable(void) ...@@ -540,9 +539,9 @@ int perf_counter_task_enable(void)
perf_flags = hw_perf_save_disable(); perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) { list_for_each_entry(counter, &ctx->counter_list, list_entry) {
if (counter->active != -1) if (counter->state != PERF_COUNTER_STATE_OFF)
continue; continue;
counter->active = 0; counter->state = PERF_COUNTER_STATE_INACTIVE;
} }
hw_perf_restore(perf_flags); hw_perf_restore(perf_flags);
...@@ -620,7 +619,7 @@ static u64 perf_counter_read(struct perf_counter *counter) ...@@ -620,7 +619,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
* If counter is enabled and currently active on a CPU, update the * If counter is enabled and currently active on a CPU, update the
* value in the counter structure: * value in the counter structure:
*/ */
if (counter->active) { if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
smp_call_function_single(counter->oncpu, smp_call_function_single(counter->oncpu,
__hw_perf_counter_read, counter, 1); __hw_perf_counter_read, counter, 1);
} }
...@@ -673,7 +672,7 @@ static struct perf_data *perf_switch_irq_data(struct perf_counter *counter) ...@@ -673,7 +672,7 @@ static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
retry: retry:
spin_lock_irq(&ctx->lock); spin_lock_irq(&ctx->lock);
if (!counter->active) { if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
counter->irqdata = counter->usrdata; counter->irqdata = counter->usrdata;
counter->usrdata = oldirqdata; counter->usrdata = oldirqdata;
spin_unlock_irq(&ctx->lock); spin_unlock_irq(&ctx->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment