Commit 5c92d124 authored by Ingo Molnar's avatar Ingo Molnar

perf counters: implement PERF_COUNT_CPU_CLOCK

Impact: add new perf-counter type

The 'CPU clock' counter counts the amount of CPU clock time that is
elapsing, in nanoseconds. (regardless of how much of it the task is
spending on a CPU executing)

This counter type is a Linux kernel based abstraction, it is available
even if the hardware does not support native hardware performance counters.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 621a01ea
...@@ -178,35 +178,6 @@ static void x86_perf_counter_enable(struct perf_counter *counter) ...@@ -178,35 +178,6 @@ static void x86_perf_counter_enable(struct perf_counter *counter)
__x86_perf_counter_enable(hwc, idx); __x86_perf_counter_enable(hwc, idx);
} }
#ifdef CONFIG_X86_64
static inline void atomic64_counter_set(struct perf_counter *counter, u64 val)
{
atomic64_set(&counter->count, val);
}
static inline u64 atomic64_counter_read(struct perf_counter *counter)
{
return atomic64_read(&counter->count);
}
#else
/*
* Todo: add proper atomic64_t support to 32-bit x86:
*/
static inline void atomic64_counter_set(struct perf_counter *counter, u64 val64)
{
u32 *val32 = (void *)&val64;
atomic_set(counter->count32 + 0, *(val32 + 0));
atomic_set(counter->count32 + 1, *(val32 + 1));
}
static inline u64 atomic64_counter_read(struct perf_counter *counter)
{
return atomic_read(counter->count32 + 0) |
(u64) atomic_read(counter->count32 + 1) << 32;
}
#endif
static void __hw_perf_save_counter(struct perf_counter *counter, static void __hw_perf_save_counter(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx) struct hw_perf_counter *hwc, int idx)
{ {
...@@ -309,7 +280,7 @@ static void x86_perf_counter_read(struct perf_counter *counter) ...@@ -309,7 +280,7 @@ static void x86_perf_counter_read(struct perf_counter *counter)
} while (offs != hwc->prev_count); } while (offs != hwc->prev_count);
val32 = (s32) val; val32 = (s32) val;
val = (s64)hwc->irq_period + (s64)val32; val = (s64)hwc->irq_period + (s64)val32;
atomic64_counter_set(counter, hwc->prev_count + val); atomic64_counter_set(counter, hwc->prev_count + val);
} }
...@@ -573,13 +544,14 @@ void __init init_hw_perf_counters(void) ...@@ -573,13 +544,14 @@ void __init init_hw_perf_counters(void)
perf_counters_initialized = true; perf_counters_initialized = true;
} }
static struct hw_perf_counter_ops x86_perf_counter_ops = { static const struct hw_perf_counter_ops x86_perf_counter_ops = {
.hw_perf_counter_enable = x86_perf_counter_enable, .hw_perf_counter_enable = x86_perf_counter_enable,
.hw_perf_counter_disable = x86_perf_counter_disable, .hw_perf_counter_disable = x86_perf_counter_disable,
.hw_perf_counter_read = x86_perf_counter_read, .hw_perf_counter_read = x86_perf_counter_read,
}; };
struct hw_perf_counter_ops *hw_perf_counter_init(struct perf_counter *counter) const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
{ {
int err; int err;
......
...@@ -131,7 +131,7 @@ struct perf_counter { ...@@ -131,7 +131,7 @@ struct perf_counter {
struct list_head list_entry; struct list_head list_entry;
struct list_head sibling_list; struct list_head sibling_list;
struct perf_counter *group_leader; struct perf_counter *group_leader;
struct hw_perf_counter_ops *hw_ops; const struct hw_perf_counter_ops *hw_ops;
int active; int active;
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
...@@ -197,7 +197,7 @@ struct perf_cpu_context { ...@@ -197,7 +197,7 @@ struct perf_cpu_context {
extern int perf_max_counters; extern int perf_max_counters;
#ifdef CONFIG_PERF_COUNTERS #ifdef CONFIG_PERF_COUNTERS
extern struct hw_perf_counter_ops * extern const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter); hw_perf_counter_init(struct perf_counter *counter);
extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
...@@ -208,6 +208,9 @@ extern void perf_counter_notify(struct pt_regs *regs); ...@@ -208,6 +208,9 @@ extern void perf_counter_notify(struct pt_regs *regs);
extern void perf_counter_print_debug(void); extern void perf_counter_print_debug(void);
extern void hw_perf_restore_ctrl(u64 ctrl); extern void hw_perf_restore_ctrl(u64 ctrl);
extern u64 hw_perf_disable_all(void); extern u64 hw_perf_disable_all(void);
extern void atomic64_counter_set(struct perf_counter *counter, u64 val64);
extern u64 atomic64_counter_read(struct perf_counter *counter);
#else #else
static inline void static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { } perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
...@@ -219,7 +222,7 @@ static inline void perf_counter_init_task(struct task_struct *task) { } ...@@ -219,7 +222,7 @@ static inline void perf_counter_init_task(struct task_struct *task) { }
static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_notify(struct pt_regs *regs) { }
static inline void perf_counter_print_debug(void) { } static inline void perf_counter_print_debug(void) { }
static inline void hw_perf_restore_ctrl(u64 ctrl) { } static inline void hw_perf_restore_ctrl(u64 ctrl) { }
static inline u64 hw_perf_disable_all(void) { return 0; } static inline u64 hw_perf_disable_all(void) { return 0; }
#endif #endif
#endif /* _LINUX_PERF_COUNTER_H */ #endif /* _LINUX_PERF_COUNTER_H */
...@@ -37,15 +37,15 @@ static DEFINE_MUTEX(perf_resource_mutex); ...@@ -37,15 +37,15 @@ static DEFINE_MUTEX(perf_resource_mutex);
/* /*
* Architecture provided APIs - weak aliases: * Architecture provided APIs - weak aliases:
*/ */
extern __weak struct hw_perf_counter_ops * extern __weak const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter) hw_perf_counter_init(struct perf_counter *counter)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
void __weak hw_perf_disable_all(void) { } u64 __weak hw_perf_disable_all(void) { return 0; }
void __weak hw_perf_enable_all(void) { } void __weak hw_perf_restore_ctrl(u64 ctrl) { }
void __weak hw_perf_counter_setup(void) { } void __weak hw_perf_counter_setup(void) { }
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
...@@ -58,6 +58,16 @@ static inline u64 perf_counter_read_safe(struct perf_counter *counter) ...@@ -58,6 +58,16 @@ static inline u64 perf_counter_read_safe(struct perf_counter *counter)
return (u64) atomic64_read(&counter->count); return (u64) atomic64_read(&counter->count);
} }
void atomic64_counter_set(struct perf_counter *counter, u64 val)
{
atomic64_set(&counter->count, val);
}
u64 atomic64_counter_read(struct perf_counter *counter)
{
return atomic64_read(&counter->count);
}
#else #else
/* /*
...@@ -79,6 +89,20 @@ static u64 perf_counter_read_safe(struct perf_counter *counter) ...@@ -79,6 +89,20 @@ static u64 perf_counter_read_safe(struct perf_counter *counter)
return cntl | ((u64) cnth) << 32; return cntl | ((u64) cnth) << 32;
} }
void atomic64_counter_set(struct perf_counter *counter, u64 val64)
{
u32 *val32 = (void *)&val64;
atomic_set(counter->count32 + 0, *(val32 + 0));
atomic_set(counter->count32 + 1, *(val32 + 1));
}
u64 atomic64_counter_read(struct perf_counter *counter)
{
return atomic_read(counter->count32 + 0) |
(u64) atomic_read(counter->count32 + 1) << 32;
}
#endif #endif
static void static void
...@@ -131,6 +155,7 @@ static void __perf_counter_remove_from_context(void *info) ...@@ -131,6 +155,7 @@ static void __perf_counter_remove_from_context(void *info)
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_counter *counter = info; struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx; struct perf_counter_context *ctx = counter->ctx;
u64 perf_flags;
/* /*
* If this is a task context, we need to check whether it is * If this is a task context, we need to check whether it is
...@@ -155,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info) ...@@ -155,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info)
* Protect the list operation against NMI by disabling the * Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters. * counters on a global level. NOP for non NMI based counters.
*/ */
hw_perf_disable_all(); perf_flags = hw_perf_disable_all();
list_del_counter(counter, ctx); list_del_counter(counter, ctx);
hw_perf_enable_all(); hw_perf_restore_ctrl(perf_flags);
if (!ctx->task) { if (!ctx->task) {
/* /*
...@@ -232,6 +257,7 @@ static void __perf_install_in_context(void *info) ...@@ -232,6 +257,7 @@ static void __perf_install_in_context(void *info)
struct perf_counter *counter = info; struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx; struct perf_counter_context *ctx = counter->ctx;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
u64 perf_flags;
/* /*
* If this is a task context, we need to check whether it is * If this is a task context, we need to check whether it is
...@@ -247,9 +273,9 @@ static void __perf_install_in_context(void *info) ...@@ -247,9 +273,9 @@ static void __perf_install_in_context(void *info)
* Protect the list operation against NMI by disabling the * Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters. * counters on a global level. NOP for non NMI based counters.
*/ */
hw_perf_disable_all(); perf_flags = hw_perf_disable_all();
list_add_counter(counter, ctx); list_add_counter(counter, ctx);
hw_perf_enable_all(); hw_perf_restore_ctrl(perf_flags);
ctx->nr_counters++; ctx->nr_counters++;
...@@ -457,6 +483,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) ...@@ -457,6 +483,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
{ {
struct perf_counter_context *ctx = &curr->perf_counter_ctx; struct perf_counter_context *ctx = &curr->perf_counter_ctx;
struct perf_counter *counter; struct perf_counter *counter;
u64 perf_flags;
if (likely(!ctx->nr_counters)) if (likely(!ctx->nr_counters))
return; return;
...@@ -468,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) ...@@ -468,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
/* /*
* Rotate the first entry last (works just fine for group counters too): * Rotate the first entry last (works just fine for group counters too):
*/ */
hw_perf_disable_all(); perf_flags = hw_perf_disable_all();
list_for_each_entry(counter, &ctx->counter_list, list_entry) { list_for_each_entry(counter, &ctx->counter_list, list_entry) {
list_del(&counter->list_entry); list_del(&counter->list_entry);
list_add_tail(&counter->list_entry, &ctx->counter_list); list_add_tail(&counter->list_entry, &ctx->counter_list);
break; break;
} }
hw_perf_enable_all(); hw_perf_restore_ctrl(perf_flags);
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
...@@ -807,6 +834,42 @@ static const struct file_operations perf_fops = { ...@@ -807,6 +834,42 @@ static const struct file_operations perf_fops = {
.poll = perf_poll, .poll = perf_poll,
}; };
static void cpu_clock_perf_counter_enable(struct perf_counter *counter)
{
}
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
{
}
static void cpu_clock_perf_counter_read(struct perf_counter *counter)
{
int cpu = raw_smp_processor_id();
atomic64_counter_set(counter, cpu_clock(cpu));
}
static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
.hw_perf_counter_enable = cpu_clock_perf_counter_enable,
.hw_perf_counter_disable = cpu_clock_perf_counter_disable,
.hw_perf_counter_read = cpu_clock_perf_counter_read,
};
static const struct hw_perf_counter_ops *
sw_perf_counter_init(struct perf_counter *counter)
{
const struct hw_perf_counter_ops *hw_ops = NULL;
switch (counter->hw_event.type) {
case PERF_COUNT_CPU_CLOCK:
hw_ops = &perf_ops_cpu_clock;
break;
default:
break;
}
return hw_ops;
}
/* /*
* Allocate and initialize a counter structure * Allocate and initialize a counter structure
*/ */
...@@ -815,7 +878,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, ...@@ -815,7 +878,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
int cpu, int cpu,
struct perf_counter *group_leader) struct perf_counter *group_leader)
{ {
struct hw_perf_counter_ops *hw_ops; const struct hw_perf_counter_ops *hw_ops;
struct perf_counter *counter; struct perf_counter *counter;
counter = kzalloc(sizeof(*counter), GFP_KERNEL); counter = kzalloc(sizeof(*counter), GFP_KERNEL);
...@@ -842,7 +905,13 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, ...@@ -842,7 +905,13 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
counter->group_leader = group_leader; counter->group_leader = group_leader;
counter->hw_ops = NULL; counter->hw_ops = NULL;
hw_ops = hw_perf_counter_init(counter); hw_ops = NULL;
if (!hw_event->raw && hw_event->type < 0)
hw_ops = sw_perf_counter_init(counter);
if (!hw_ops) {
hw_ops = hw_perf_counter_init(counter);
}
if (!hw_ops) { if (!hw_ops) {
kfree(counter); kfree(counter);
return NULL; return NULL;
...@@ -912,7 +981,7 @@ asmlinkage int sys_perf_counter_open( ...@@ -912,7 +981,7 @@ asmlinkage int sys_perf_counter_open(
goto err_put_context; goto err_put_context;
} }
ret = -ENOMEM; ret = -EINVAL;
counter = perf_counter_alloc(&hw_event, cpu, group_leader); counter = perf_counter_alloc(&hw_event, cpu, group_leader);
if (!counter) if (!counter)
goto err_put_context; goto err_put_context;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment