Commit 40826c45 authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf thread: Remove notion of dead threads

The dead thread list is best effort. Threads live on it until the
reference count hits zero and they are removed. With correct reference
counting this should never happen. It is, however, part of the 'perf
sched' output that is now removed. If this is an issue we should
implement tracking of dead threads in a robust not best-effort way.
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ali Saidi <alisaidi@amazon.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Brian Robbins <brianrob@linux.microsoft.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: Fangrui Song <maskray@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ivan Babrou <ivan@cloudflare.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Steinar H. Gunderson <sesse@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Wenyu Liu <liuwenyu7@huawei.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Ye Xingchen <ye.xingchen@zte.com.cn>
Cc: Yuan Can <yuancan@huawei.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230608232823.4027869-2-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent dcf7a177
......@@ -2760,7 +2760,7 @@ struct total_run_stats {
u64 total_run_time;
};
static int __show_thread_runtime(struct thread *t, void *priv)
static int show_thread_runtime(struct thread *t, void *priv)
{
struct total_run_stats *stats = priv;
struct thread_runtime *r;
......@@ -2783,22 +2783,6 @@ static int __show_thread_runtime(struct thread *t, void *priv)
return 0;
}
static int show_thread_runtime(struct thread *t, void *priv)
{
if (t->dead)
return 0;
return __show_thread_runtime(t, priv);
}
static int show_deadthread_runtime(struct thread *t, void *priv)
{
if (!t->dead)
return 0;
return __show_thread_runtime(t, priv);
}
static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
{
const char *sep = " <- ";
......@@ -2890,11 +2874,6 @@ static void timehist_print_summary(struct perf_sched *sched,
if (!task_count)
printf("<no still running tasks>\n");
printf("\nTerminated tasks:\n");
machine__for_each_thread(m, show_deadthread_runtime, &totals);
if (task_count == totals.task_count)
printf("<no terminated tasks>\n");
/* CPU idle stats not tracked when samples were skipped */
if (sched->skipped_samples && !sched->idle_hist)
return;
......
......@@ -3292,12 +3292,6 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
goto err_free_queues;
}
/*
* Initialize list node so that at thread__zput() we can avoid
* segmentation fault at list_del_init().
*/
INIT_LIST_HEAD(&etm->unknown_thread->node);
err = thread__set_comm(etm->unknown_thread, "unknown", 0);
if (err)
goto err_delete_thread;
......
......@@ -4311,14 +4311,6 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
goto err_free_queues;
}
/*
* Since this thread will not be kept in any rbtree not in a
* list, initialize its list node so that at thread__put() the
* current thread lifetime assumption is kept and we don't segfault
* at list_del_init().
*/
INIT_LIST_HEAD(&pt->unknown_thread->node);
err = thread__set_comm(pt->unknown_thread, "unknown", 0);
if (err)
goto err_delete_thread;
......
......@@ -241,17 +241,6 @@ void machine__exit(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
struct thread *thread, *n;
/*
* Forget about the dead, at this point whatever threads were
* left in the dead lists better have a reference count taken
* by who is using them, and then, when they drop those references
* and it finally hits zero, thread__put() will check and see that
* its not in the dead threads list and will not try to remove it
* from there, just calling thread__delete() straight away.
*/
list_for_each_entry_safe(thread, n, &threads->dead, node)
list_del_init(&thread->node);
exit_rwsem(&threads->lock);
}
......@@ -2046,18 +2035,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
--threads->nr;
/*
* Move it first to the dead_threads list, then drop the reference,
* if this is the last reference, then the thread__delete destructor
* will be called and we will remove it from the dead_threads list.
*/
list_add_tail(&th->node, &threads->dead);
/*
* We need to do the put here because if this is the last refcount,
* then we will be touching the threads->dead head when removing the
* thread.
*/
thread__put(th);
if (lock)
......@@ -2145,10 +2123,8 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
if (dump_trace)
perf_event__fprintf_task(event, stdout);
if (thread != NULL) {
thread__exited(thread);
if (thread != NULL)
thread__put(thread);
}
return 0;
}
......@@ -3204,12 +3180,6 @@ int machine__for_each_thread(struct machine *machine,
if (rc != 0)
return rc;
}
list_for_each_entry(thread, &threads->dead, node) {
rc = fn(thread, priv);
if (rc != 0)
return rc;
}
}
return rc;
}
......
......@@ -125,31 +125,8 @@ struct thread *thread__get(struct thread *thread)
void thread__put(struct thread *thread)
{
if (thread && refcount_dec_and_test(&thread->refcnt)) {
/*
* Remove it from the dead threads list, as last reference is
* gone, if it is in a dead threads list.
*
* We may not be there anymore if say, the machine where it was
* stored was already deleted, so we already removed it from
* the dead threads and some other piece of code still keeps a
* reference.
*
* This is what 'perf sched' does and finally drops it in
* perf_sched__lat(), where it calls perf_sched__read_events(),
* that processes the events by creating a session and deleting
* it, which ends up destroying the list heads for the dead
* threads, but before it does that it removes all threads from
* it using list_del_init().
*
* So we need to check here if it is in a dead threads list and
* if so, remove it before finally deleting the thread, to avoid
* an use after free situation.
*/
if (!list_empty(&thread->node))
list_del_init(&thread->node);
if (thread && refcount_dec_and_test(&thread->refcnt))
thread__delete(thread);
}
}
static struct namespaces *__thread__namespaces(const struct thread *thread)
......
......@@ -30,10 +30,7 @@ struct lbr_stitch {
};
struct thread {
union {
struct rb_node rb_node;
struct list_head node;
};
struct rb_node rb_node;
struct maps *maps;
pid_t pid_; /* Not all tools update this */
pid_t tid;
......@@ -43,7 +40,6 @@ struct thread {
refcount_t refcnt;
bool comm_set;
int comm_len;
bool dead; /* if set thread has exited */
struct list_head namespaces_list;
struct rw_semaphore namespaces_lock;
struct list_head comm_list;
......@@ -81,11 +77,6 @@ static inline void __thread__zput(struct thread **thread)
#define thread__zput(thread) __thread__zput(&thread)
static inline void thread__exited(struct thread *thread)
{
thread->dead = true;
}
struct namespaces *thread__namespaces(struct thread *thread);
int thread__set_namespaces(struct thread *thread, u64 timestamp,
struct perf_record_namespaces *event);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment