Commit 6ce77bfd authored by Alexander Shishkin's avatar Alexander Shishkin Committed by Ingo Molnar

perf/core: Allow kernel filters on CPU events

While supporting file-based address filters for CPU events requires some
extra context switch handling, kernel address filters are easy, since the
kernel mapping is preserved across address spaces. It is also useful as
it permits tracing scheduling paths of the kernel.

This patch allows setting up kernel filters for CPU events.
Signed-off-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Will Deacon <will.deacon@arm.com>
Cc: vince@deater.net
Link: http://lkml.kernel.org/r/20170126094057.13805-4-alexander.shishkin@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9ccbfbb1
...@@ -482,6 +482,7 @@ struct perf_addr_filter { ...@@ -482,6 +482,7 @@ struct perf_addr_filter {
* @list: list of filters for this event * @list: list of filters for this event
* @lock: spinlock that serializes accesses to the @list and event's * @lock: spinlock that serializes accesses to the @list and event's
* (and its children's) filter generations. * (and its children's) filter generations.
* @nr_file_filters: number of file-based filters
* *
* A child event will use parent's @list (and therefore @lock), so they are * A child event will use parent's @list (and therefore @lock), so they are
* bundled together; see perf_event_addr_filters(). * bundled together; see perf_event_addr_filters().
...@@ -489,6 +490,7 @@ struct perf_addr_filter { ...@@ -489,6 +490,7 @@ struct perf_addr_filter {
struct perf_addr_filters_head { struct perf_addr_filters_head {
struct list_head list; struct list_head list;
raw_spinlock_t lock; raw_spinlock_t lock;
unsigned int nr_file_filters;
}; };
/** /**
......
...@@ -8090,6 +8090,9 @@ static void perf_event_addr_filters_apply(struct perf_event *event) ...@@ -8090,6 +8090,9 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
if (task == TASK_TOMBSTONE) if (task == TASK_TOMBSTONE)
return; return;
if (!ifh->nr_file_filters)
return;
mm = get_task_mm(event->ctx->task); mm = get_task_mm(event->ctx->task);
if (!mm) if (!mm)
goto restart; goto restart;
...@@ -8268,6 +8271,18 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, ...@@ -8268,6 +8271,18 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (!filename) if (!filename)
goto fail; goto fail;
/*
* For now, we only support file-based filters
* in per-task events; doing so for CPU-wide
* events requires additional context switching
* trickery, since same object code will be
* mapped at different virtual addresses in
* different processes.
*/
ret = -EOPNOTSUPP;
if (!event->ctx->task)
goto fail_free_name;
/* look up the path and grab its inode */ /* look up the path and grab its inode */
ret = kern_path(filename, LOOKUP_FOLLOW, &path); ret = kern_path(filename, LOOKUP_FOLLOW, &path);
if (ret) if (ret)
...@@ -8283,6 +8298,8 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, ...@@ -8283,6 +8298,8 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
!S_ISREG(filter->inode->i_mode)) !S_ISREG(filter->inode->i_mode))
/* free_filters_list() will iput() */ /* free_filters_list() will iput() */
goto fail; goto fail;
event->addr_filters.nr_file_filters++;
} }
/* ready to consume more filters */ /* ready to consume more filters */
...@@ -8322,24 +8339,13 @@ perf_event_set_addr_filter(struct perf_event *event, char *filter_str) ...@@ -8322,24 +8339,13 @@ perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
if (WARN_ON_ONCE(event->parent)) if (WARN_ON_ONCE(event->parent))
return -EINVAL; return -EINVAL;
/*
* For now, we only support filtering in per-task events; doing so
* for CPU-wide events requires additional context switching trickery,
* since same object code will be mapped at different virtual
* addresses in different processes.
*/
if (!event->ctx->task)
return -EOPNOTSUPP;
ret = perf_event_parse_addr_filter(event, filter_str, &filters); ret = perf_event_parse_addr_filter(event, filter_str, &filters);
if (ret) if (ret)
return ret; goto fail_clear_files;
ret = event->pmu->addr_filters_validate(&filters); ret = event->pmu->addr_filters_validate(&filters);
if (ret) { if (ret)
free_filters_list(&filters); goto fail_free_filters;
return ret;
}
/* remove existing filters, if any */ /* remove existing filters, if any */
perf_addr_filters_splice(event, &filters); perf_addr_filters_splice(event, &filters);
...@@ -8347,6 +8353,14 @@ perf_event_set_addr_filter(struct perf_event *event, char *filter_str) ...@@ -8347,6 +8353,14 @@ perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
/* install new filters */ /* install new filters */
perf_event_for_each_child(event, perf_event_addr_filters_apply); perf_event_for_each_child(event, perf_event_addr_filters_apply);
return ret;
fail_free_filters:
free_filters_list(&filters);
fail_clear_files:
event->addr_filters.nr_file_filters = 0;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment