Commit 50f16a8b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Remove type specific target pointers

The only reason CQM had to use a hard-coded pmu type was so it could use
cqm_target in hw_perf_event.

Do away with the {tp,bp,cqm}_target pointers and provide a non type
specific one.

This allows us to do away with that silly pmu type as well.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Vince Weaver <vince@deater.net>
Cc: acme@kernel.org
Cc: acme@redhat.com
Cc: hpa@zytor.com
Cc: jolsa@redhat.com
Cc: kanaka.d.juvva@intel.com
Cc: matt.fleming@intel.com
Cc: tglx@linutronix.de
Cc: torvalds@linux-foundation.org
Cc: vikas.shivappa@linux.intel.com
Link: http://lkml.kernel.org/r/20150305211019.GU21418@twins.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4e16ed99
...@@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) ...@@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Per-cpu breakpoints are not supported by our stepping * Per-cpu breakpoints are not supported by our stepping
* mechanism. * mechanism.
*/ */
if (!bp->hw.bp_target) if (!bp->hw.target)
return -EINVAL; return -EINVAL;
/* /*
......
...@@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) ...@@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Disallow per-task kernel breakpoints since these would * Disallow per-task kernel breakpoints since these would
* complicate the stepping code. * complicate the stepping code.
*/ */
if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target) if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
return -EINVAL; return -EINVAL;
return 0; return 0;
......
...@@ -263,7 +263,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b) ...@@ -263,7 +263,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
/* /*
* Events that target same task are placed into the same cache group. * Events that target same task are placed into the same cache group.
*/ */
if (a->hw.cqm_target == b->hw.cqm_target) if (a->hw.target == b->hw.target)
return true; return true;
/* /*
...@@ -279,7 +279,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b) ...@@ -279,7 +279,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
{ {
if (event->attach_state & PERF_ATTACH_TASK) if (event->attach_state & PERF_ATTACH_TASK)
return perf_cgroup_from_task(event->hw.cqm_target); return perf_cgroup_from_task(event->hw.target);
return event->cgrp; return event->cgrp;
} }
...@@ -1365,8 +1365,7 @@ static int __init intel_cqm_init(void) ...@@ -1365,8 +1365,7 @@ static int __init intel_cqm_init(void)
__perf_cpu_notifier(intel_cqm_cpu_notifier); __perf_cpu_notifier(intel_cqm_cpu_notifier);
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
PERF_TYPE_INTEL_CQM);
if (ret) if (ret)
pr_err("Intel CQM perf registration failed: %d\n", ret); pr_err("Intel CQM perf registration failed: %d\n", ret);
else else
......
...@@ -119,7 +119,6 @@ struct hw_perf_event { ...@@ -119,7 +119,6 @@ struct hw_perf_event {
struct hrtimer hrtimer; struct hrtimer hrtimer;
}; };
struct { /* tracepoint */ struct { /* tracepoint */
struct task_struct *tp_target;
/* for tp_event->class */ /* for tp_event->class */
struct list_head tp_list; struct list_head tp_list;
}; };
...@@ -129,7 +128,6 @@ struct hw_perf_event { ...@@ -129,7 +128,6 @@ struct hw_perf_event {
struct list_head cqm_events_entry; struct list_head cqm_events_entry;
struct list_head cqm_groups_entry; struct list_head cqm_groups_entry;
struct list_head cqm_group_entry; struct list_head cqm_group_entry;
struct task_struct *cqm_target;
}; };
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */ struct { /* breakpoint */
...@@ -138,12 +136,12 @@ struct hw_perf_event { ...@@ -138,12 +136,12 @@ struct hw_perf_event {
* problem hw_breakpoint has with context * problem hw_breakpoint has with context
* creation and event initalization. * creation and event initalization.
*/ */
struct task_struct *bp_target;
struct arch_hw_breakpoint info; struct arch_hw_breakpoint info;
struct list_head bp_list; struct list_head bp_list;
}; };
#endif #endif
}; };
struct task_struct *target;
int state; int state;
local64_t prev_count; local64_t prev_count;
u64 sample_period; u64 sample_period;
......
...@@ -32,7 +32,6 @@ enum perf_type_id { ...@@ -32,7 +32,6 @@ enum perf_type_id {
PERF_TYPE_HW_CACHE = 3, PERF_TYPE_HW_CACHE = 3,
PERF_TYPE_RAW = 4, PERF_TYPE_RAW = 4,
PERF_TYPE_BREAKPOINT = 5, PERF_TYPE_BREAKPOINT = 5,
PERF_TYPE_INTEL_CQM = 6,
PERF_TYPE_MAX, /* non-ABI */ PERF_TYPE_MAX, /* non-ABI */
}; };
......
...@@ -7171,18 +7171,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -7171,18 +7171,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (task) { if (task) {
event->attach_state = PERF_ATTACH_TASK; event->attach_state = PERF_ATTACH_TASK;
if (attr->type == PERF_TYPE_TRACEPOINT)
event->hw.tp_target = task;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/* /*
* hw_breakpoint is a bit difficult here.. * XXX pmu::event_init needs to know what task to account to
* and we cannot use the ctx information because we need the
* pmu before we get a ctx.
*/ */
else if (attr->type == PERF_TYPE_BREAKPOINT) event->hw.target = task;
event->hw.bp_target = task;
#endif
else if (attr->type == PERF_TYPE_INTEL_CQM)
event->hw.cqm_target = task;
} }
if (!overflow_handler && parent_event) { if (!overflow_handler && parent_event) {
......
...@@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) ...@@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
*/ */
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{ {
struct task_struct *tsk = bp->hw.bp_target; struct task_struct *tsk = bp->hw.target;
struct perf_event *iter; struct perf_event *iter;
int count = 0; int count = 0;
list_for_each_entry(iter, &bp_task_head, hw.bp_list) { list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
if (iter->hw.bp_target == tsk && if (iter->hw.target == tsk &&
find_slot_idx(iter) == type && find_slot_idx(iter) == type &&
(iter->cpu < 0 || cpu == iter->cpu)) (iter->cpu < 0 || cpu == iter->cpu))
count += hw_breakpoint_weight(iter); count += hw_breakpoint_weight(iter);
...@@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, ...@@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
int nr; int nr;
nr = info->cpu_pinned; nr = info->cpu_pinned;
if (!bp->hw.bp_target) if (!bp->hw.target)
nr += max_task_bp_pinned(cpu, type); nr += max_task_bp_pinned(cpu, type);
else else
nr += task_bp_pinned(cpu, bp, type); nr += task_bp_pinned(cpu, bp, type);
...@@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, ...@@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
weight = -weight; weight = -weight;
/* Pinned counter cpu profiling */ /* Pinned counter cpu profiling */
if (!bp->hw.bp_target) { if (!bp->hw.target) {
get_bp_info(bp->cpu, type)->cpu_pinned += weight; get_bp_info(bp->cpu, type)->cpu_pinned += weight;
return; return;
} }
......
...@@ -1005,7 +1005,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) ...@@ -1005,7 +1005,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
return true; return true;
list_for_each_entry(event, &filter->perf_events, hw.tp_list) { list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
if (event->hw.tp_target->mm == mm) if (event->hw.target->mm == mm)
return true; return true;
} }
...@@ -1015,7 +1015,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) ...@@ -1015,7 +1015,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
static inline bool static inline bool
uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
{ {
return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm); return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
} }
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
...@@ -1023,10 +1023,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) ...@@ -1023,10 +1023,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
bool done; bool done;
write_lock(&tu->filter.rwlock); write_lock(&tu->filter.rwlock);
if (event->hw.tp_target) { if (event->hw.target) {
list_del(&event->hw.tp_list); list_del(&event->hw.tp_list);
done = tu->filter.nr_systemwide || done = tu->filter.nr_systemwide ||
(event->hw.tp_target->flags & PF_EXITING) || (event->hw.target->flags & PF_EXITING) ||
uprobe_filter_event(tu, event); uprobe_filter_event(tu, event);
} else { } else {
tu->filter.nr_systemwide--; tu->filter.nr_systemwide--;
...@@ -1046,7 +1046,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) ...@@ -1046,7 +1046,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
int err; int err;
write_lock(&tu->filter.rwlock); write_lock(&tu->filter.rwlock);
if (event->hw.tp_target) { if (event->hw.target) {
/* /*
* event->parent != NULL means copy_process(), we can avoid * event->parent != NULL means copy_process(), we can avoid
* uprobe_apply(). current->mm must be probed and we can rely * uprobe_apply(). current->mm must be probed and we can rely
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment