Commit 614e4c4e authored by Stephane Eranian's avatar Stephane Eranian Committed by Ingo Molnar

perf/core: Robustify the perf_cgroup_from_task() RCU checks

This patch reinforces the lockdep checks performed by
perf_cgroup_from_tsk() by passing the perf_event_context
whenever possible. It is okay to not hold the RCU read lock
when we know we hold the ctx->lock. This patch makes sure this
property holds.

In some functions, such as perf_cgroup_sched_in(), we do not
pass the context because we are sure we are holding the RCU
read lock.
Signed-off-by: default avatarStephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: edumazet@google.com
Link: http://lkml.kernel.org/r/1447322404-10920-3-git-send-email-eranian@google.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ddaaf4e2
...@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b) ...@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
{ {
if (event->attach_state & PERF_ATTACH_TASK) if (event->attach_state & PERF_ATTACH_TASK)
return perf_cgroup_from_task(event->hw.target); return perf_cgroup_from_task(event->hw.target, event->ctx);
return event->cgrp; return event->cgrp;
} }
......
...@@ -697,9 +697,11 @@ struct perf_cgroup { ...@@ -697,9 +697,11 @@ struct perf_cgroup {
* if there is no cgroup event for the current CPU context. * if there is no cgroup event for the current CPU context.
*/ */
static inline struct perf_cgroup * static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task) perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
{ {
return container_of(task_css(task, perf_event_cgrp_id), return container_of(task_css_check(task, perf_event_cgrp_id,
ctx ? lockdep_is_held(&ctx->lock)
: true),
struct perf_cgroup, css); struct perf_cgroup, css);
} }
#endif /* CONFIG_CGROUP_PERF */ #endif /* CONFIG_CGROUP_PERF */
......
...@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) ...@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
if (!is_cgroup_event(event)) if (!is_cgroup_event(event))
return; return;
cgrp = perf_cgroup_from_task(current); cgrp = perf_cgroup_from_task(current, event->ctx);
/* /*
* Do not update time when cgroup is not active * Do not update time when cgroup is not active
*/ */
...@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task, ...@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
if (!task || !ctx->nr_cgroups) if (!task || !ctx->nr_cgroups)
return; return;
cgrp = perf_cgroup_from_task(task); cgrp = perf_cgroup_from_task(task, ctx);
info = this_cpu_ptr(cgrp->info); info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp; info->timestamp = ctx->timestamp;
} }
...@@ -521,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode) ...@@ -521,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
* set cgrp before ctxsw in to allow * set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass * event_filter_match() to not have to pass
* task around * task around
* we pass the cpuctx->ctx to perf_cgroup_from_task()
* because cgorup events are only per-cpu
*/ */
cpuctx->cgrp = perf_cgroup_from_task(task); cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
} }
perf_pmu_enable(cpuctx->ctx.pmu); perf_pmu_enable(cpuctx->ctx.pmu);
...@@ -542,15 +544,17 @@ static inline void perf_cgroup_sched_out(struct task_struct *task, ...@@ -542,15 +544,17 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
rcu_read_lock(); rcu_read_lock();
/* /*
* we come here when we know perf_cgroup_events > 0 * we come here when we know perf_cgroup_events > 0
* we do not need to pass the ctx here because we know
* we are holding the rcu lock
*/ */
cgrp1 = perf_cgroup_from_task(task); cgrp1 = perf_cgroup_from_task(task, NULL);
/* /*
* next is NULL when called from perf_event_enable_on_exec() * next is NULL when called from perf_event_enable_on_exec()
* that will systematically cause a cgroup_switch() * that will systematically cause a cgroup_switch()
*/ */
if (next) if (next)
cgrp2 = perf_cgroup_from_task(next); cgrp2 = perf_cgroup_from_task(next, NULL);
/* /*
* only schedule out current cgroup events if we know * only schedule out current cgroup events if we know
...@@ -572,11 +576,13 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev, ...@@ -572,11 +576,13 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
rcu_read_lock(); rcu_read_lock();
/* /*
* we come here when we know perf_cgroup_events > 0 * we come here when we know perf_cgroup_events > 0
* we do not need to pass the ctx here because we know
* we are holding the rcu lock
*/ */
cgrp1 = perf_cgroup_from_task(task); cgrp1 = perf_cgroup_from_task(task, NULL);
/* prev can never be NULL */ /* prev can never be NULL */
cgrp2 = perf_cgroup_from_task(prev); cgrp2 = perf_cgroup_from_task(prev, NULL);
/* /*
* only need to schedule in cgroup events if we are changing * only need to schedule in cgroup events if we are changing
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment