Commit a0827713 authored by Chengming Zhou's avatar Chengming Zhou Committed by Peter Zijlstra

perf/core: Don't pass task around when ctx sched in

The current code pass task around for ctx_sched_in(), only
to get perf_cgroup of the task, then update the timestamp
of it and its ancestors and set them to active.

But we can use cpuctx->cgrp to get active perf_cgroup and
its ancestors since cpuctx->cgrp has been set before
ctx_sched_in().

This patch remove the task argument in ctx_sched_in()
and cleanup related code.
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220329154523.86438-2-zhouchengming@bytedance.com
parent e590928d
...@@ -574,8 +574,7 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, ...@@ -574,8 +574,7 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type); enum event_type_t event_type);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type, enum event_type_t event_type);
struct task_struct *task);
static void update_context_time(struct perf_event_context *ctx); static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event); static u64 perf_event_time(struct perf_event *event);
...@@ -801,10 +800,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) ...@@ -801,10 +800,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
} }
static inline void static inline void
perf_cgroup_set_timestamp(struct task_struct *task, perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
struct perf_event_context *ctx)
{ {
struct perf_cgroup *cgrp; struct perf_event_context *ctx = &cpuctx->ctx;
struct perf_cgroup *cgrp = cpuctx->cgrp;
struct perf_cgroup_info *info; struct perf_cgroup_info *info;
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css;
...@@ -813,10 +812,10 @@ perf_cgroup_set_timestamp(struct task_struct *task, ...@@ -813,10 +812,10 @@ perf_cgroup_set_timestamp(struct task_struct *task,
* ensure we do not access cgroup data * ensure we do not access cgroup data
* unless we have the cgroup pinned (css_get) * unless we have the cgroup pinned (css_get)
*/ */
if (!task || !ctx->nr_cgroups) if (!cgrp)
return; return;
cgrp = perf_cgroup_from_task(task, ctx); WARN_ON_ONCE(!ctx->nr_cgroups);
for (css = &cgrp->css; css; css = css->parent) { for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css); cgrp = container_of(css, struct perf_cgroup, css);
...@@ -869,14 +868,14 @@ static void perf_cgroup_switch(struct task_struct *task, int mode) ...@@ -869,14 +868,14 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
WARN_ON_ONCE(cpuctx->cgrp); WARN_ON_ONCE(cpuctx->cgrp);
/* /*
* set cgrp before ctxsw in to allow * set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass * perf_cgroup_set_timestamp() in ctx_sched_in()
* task around * to not have to pass task around
* we pass the cpuctx->ctx to perf_cgroup_from_task() * we pass the cpuctx->ctx to perf_cgroup_from_task()
* because cgorup events are only per-cpu * because cgorup events are only per-cpu
*/ */
cpuctx->cgrp = perf_cgroup_from_task(task, cpuctx->cgrp = perf_cgroup_from_task(task,
&cpuctx->ctx); &cpuctx->ctx);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); cpu_ctx_sched_in(cpuctx, EVENT_ALL);
} }
perf_pmu_enable(cpuctx->ctx.pmu); perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx); perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
...@@ -1118,8 +1117,7 @@ static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, ...@@ -1118,8 +1117,7 @@ static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
} }
static inline void static inline void
perf_cgroup_set_timestamp(struct task_struct *task, perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
struct perf_event_context *ctx)
{ {
} }
...@@ -2713,8 +2711,7 @@ static void ctx_sched_out(struct perf_event_context *ctx, ...@@ -2713,8 +2711,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
static void static void
ctx_sched_in(struct perf_event_context *ctx, ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
enum event_type_t event_type, enum event_type_t event_type);
struct task_struct *task);
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx, struct perf_event_context *ctx,
...@@ -2730,15 +2727,14 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, ...@@ -2730,15 +2727,14 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
} }
static void perf_event_sched_in(struct perf_cpu_context *cpuctx, static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx, struct perf_event_context *ctx)
struct task_struct *task)
{ {
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); cpu_ctx_sched_in(cpuctx, EVENT_PINNED);
if (ctx) if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
if (ctx) if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
} }
/* /*
...@@ -2788,7 +2784,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, ...@@ -2788,7 +2784,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
else if (ctx_event_type & EVENT_PINNED) else if (ctx_event_type & EVENT_PINNED)
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, task_ctx, current); perf_event_sched_in(cpuctx, task_ctx);
perf_pmu_enable(cpuctx->ctx.pmu); perf_pmu_enable(cpuctx->ctx.pmu);
} }
...@@ -3011,7 +3007,7 @@ static void __perf_event_enable(struct perf_event *event, ...@@ -3011,7 +3007,7 @@ static void __perf_event_enable(struct perf_event *event,
return; return;
if (!event_filter_match(event)) { if (!event_filter_match(event)) {
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); ctx_sched_in(ctx, cpuctx, EVENT_TIME);
return; return;
} }
...@@ -3020,7 +3016,7 @@ static void __perf_event_enable(struct perf_event *event, ...@@ -3020,7 +3016,7 @@ static void __perf_event_enable(struct perf_event *event,
* then don't put it on unless the group is on. * then don't put it on unless the group is on.
*/ */
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); ctx_sched_in(ctx, cpuctx, EVENT_TIME);
return; return;
} }
...@@ -3865,8 +3861,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, ...@@ -3865,8 +3861,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
static void static void
ctx_sched_in(struct perf_event_context *ctx, ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
enum event_type_t event_type, enum event_type_t event_type)
struct task_struct *task)
{ {
int is_active = ctx->is_active; int is_active = ctx->is_active;
...@@ -3878,7 +3873,7 @@ ctx_sched_in(struct perf_event_context *ctx, ...@@ -3878,7 +3873,7 @@ ctx_sched_in(struct perf_event_context *ctx,
if (is_active ^ EVENT_TIME) { if (is_active ^ EVENT_TIME) {
/* start ctx time */ /* start ctx time */
__update_context_time(ctx, false); __update_context_time(ctx, false);
perf_cgroup_set_timestamp(task, ctx); perf_cgroup_set_timestamp(cpuctx);
/* /*
* CPU-release for the below ->is_active store, * CPU-release for the below ->is_active store,
* see __load_acquire() in perf_event_time_now() * see __load_acquire() in perf_event_time_now()
...@@ -3909,12 +3904,11 @@ ctx_sched_in(struct perf_event_context *ctx, ...@@ -3909,12 +3904,11 @@ ctx_sched_in(struct perf_event_context *ctx,
} }
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type, enum event_type_t event_type)
struct task_struct *task)
{ {
struct perf_event_context *ctx = &cpuctx->ctx; struct perf_event_context *ctx = &cpuctx->ctx;
ctx_sched_in(ctx, cpuctx, event_type, task); ctx_sched_in(ctx, cpuctx, event_type);
} }
static void perf_event_context_sched_in(struct perf_event_context *ctx, static void perf_event_context_sched_in(struct perf_event_context *ctx,
...@@ -3956,7 +3950,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, ...@@ -3956,7 +3950,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
*/ */
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, ctx, task); perf_event_sched_in(cpuctx, ctx);
if (cpuctx->sched_cb_usage && pmu->sched_task) if (cpuctx->sched_cb_usage && pmu->sched_task)
pmu->sched_task(cpuctx->task_ctx, true); pmu->sched_task(cpuctx->task_ctx, true);
...@@ -4267,7 +4261,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx) ...@@ -4267,7 +4261,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
if (cpu_event) if (cpu_event)
rotate_ctx(&cpuctx->ctx, cpu_event); rotate_ctx(&cpuctx->ctx, cpu_event);
perf_event_sched_in(cpuctx, task_ctx, current); perf_event_sched_in(cpuctx, task_ctx);
perf_pmu_enable(cpuctx->ctx.pmu); perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx); perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
...@@ -4339,7 +4333,7 @@ static void perf_event_enable_on_exec(int ctxn) ...@@ -4339,7 +4333,7 @@ static void perf_event_enable_on_exec(int ctxn)
clone_ctx = unclone_ctx(ctx); clone_ctx = unclone_ctx(ctx);
ctx_resched(cpuctx, ctx, event_type); ctx_resched(cpuctx, ctx, event_type);
} else { } else {
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); ctx_sched_in(ctx, cpuctx, EVENT_TIME);
} }
perf_ctx_unlock(cpuctx, ctx); perf_ctx_unlock(cpuctx, ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment