Commit 4d9493c9 authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

ftrace: remove add-hoc code

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent d05cdb25
...@@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag) ...@@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
void ftrace_task(struct task_struct *p, void *__tr, void *__data)
{
#if 0
/*
* trace timeline tree
*/
__trace_special(__tr, __data,
p->pid, p->se.vruntime, p->se.sum_exec_runtime);
#else
/*
* trace balance metrics
*/
__trace_special(__tr, __data,
p->pid, p->se.avg_overlap, 0);
#endif
}
void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
{
struct task_struct *p;
struct sched_entity *se;
struct rb_node *curr;
struct rq *rq = __rq;
if (rq->cfs.curr) {
p = task_of(rq->cfs.curr);
ftrace_task(p, __tr, __data);
}
if (rq->cfs.next) {
p = task_of(rq->cfs.next);
ftrace_task(p, __tr, __data);
}
for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) {
se = rb_entry(curr, struct sched_entity, run_node);
if (!entity_is_task(se))
continue;
p = task_of(se);
ftrace_task(p, __tr, __data);
}
}
#endif
/*** /***
* try_to_wake_up - wake up a thread * try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread * @p: the to-be-woken-up thread
......
...@@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, ...@@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
if (!(this_sd->flags & SD_WAKE_AFFINE)) if (!(this_sd->flags & SD_WAKE_AFFINE))
return 0; return 0;
ftrace_special(__LINE__, curr->se.avg_overlap, sync);
ftrace_special(__LINE__, p->se.avg_overlap, -1);
/* /*
* If the currently running task will sleep within * If the currently running task will sleep within
* a reasonable amount of time then attract this newly * a reasonable amount of time then attract this newly
...@@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) ...@@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
if (unlikely(se == pse)) if (unlikely(se == pse))
return; return;
ftrace_special(__LINE__, p->pid, se->last_wakeup);
cfs_rq_of(pse)->next = pse; cfs_rq_of(pse)->next = pse;
/* /*
......
...@@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) ...@@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
data = tr->data[cpu]; data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1))
tracing_sched_switch_trace(tr, data, prev, next, flags); tracing_sched_switch_trace(tr, data, prev, next, flags);
if (trace_flags & TRACE_ITER_SCHED_TREE)
ftrace_all_fair_tasks(__rq, tr, data);
}
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) ...@@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
data = tr->data[cpu]; data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1))
tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
if (trace_flags & TRACE_ITER_SCHED_TREE)
ftrace_all_fair_tasks(__rq, tr, data);
}
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment