Commit 96e6122c authored by Zheng Yejian's avatar Zheng Yejian Committed by Steven Rostedt (Google)

tracing: Optimize event type allocation with IDA

After commit 060fa5c8 ("tracing/events: reuse trace event ids after
 overflow"), trace events with dynamic type are linked up in list
'ftrace_event_list' through field 'trace_event.list'. Then when max
event type number used up, it's possible to reuse type number of some
freed one by traversing 'ftrace_event_list'.

As instead, using IDA to manage available type numbers can make codes
simpler and then the field 'trace_event.list' can be dropped.

Since 'struct trace_event' is used in static tracepoints, drop
'trace_event.list' can make vmlinux smaller. Local test with about 2000
tracepoints, vmlinux reduced about 64KB:
  before:-rwxrwxr-x 1 root root 76669448 Nov  8 17:14 vmlinux
  after: -rwxrwxr-x 1 root root 76604176 Nov  8 17:15 vmlinux

Link: https://lkml.kernel.org/r/20221110020319.1259291-1-zhengyejian1@huawei.comSigned-off-by: default avatarZheng Yejian <zhengyejian1@huawei.com>
Acked-by: default avatarMasami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: default avatarSteven Rostedt (Google) <rostedt@goodmis.org>
parent a76d4648
......@@ -136,7 +136,6 @@ struct trace_event_functions {
struct trace_event {
struct hlist_node node;
struct list_head list;
int type;
struct trace_event_functions *funcs;
};
......
......@@ -11,6 +11,7 @@
#include <linux/kprobes.h>
#include <linux/sched/clock.h>
#include <linux/sched/mm.h>
#include <linux/idr.h>
#include "trace_output.h"
......@@ -21,8 +22,6 @@ DECLARE_RWSEM(trace_event_sem);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
static int next_event_type = __TRACE_LAST_TYPE;
enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
......@@ -688,38 +687,23 @@ struct trace_event *ftrace_find_event(int type)
return NULL;
}
static LIST_HEAD(ftrace_event_list);
static DEFINE_IDA(trace_event_ida);
static int trace_search_list(struct list_head **list)
static void free_trace_event_type(int type)
{
struct trace_event *e = NULL, *iter;
int next = __TRACE_LAST_TYPE;
if (list_empty(&ftrace_event_list)) {
*list = &ftrace_event_list;
return next;
}
if (type >= __TRACE_LAST_TYPE)
ida_free(&trace_event_ida, type);
}
/*
* We used up all possible max events,
* lets see if somebody freed one.
*/
list_for_each_entry(iter, &ftrace_event_list, list) {
if (iter->type != next) {
e = iter;
break;
}
next++;
}
static int alloc_trace_event_type(void)
{
int next;
/* Did we used up all 65 thousand events??? */
if (next > TRACE_EVENT_TYPE_MAX)
/* Skip static defined type numbers */
next = ida_alloc_range(&trace_event_ida, __TRACE_LAST_TYPE,
TRACE_EVENT_TYPE_MAX, GFP_KERNEL);
if (next < 0)
return 0;
if (e)
*list = &e->list;
else
*list = &ftrace_event_list;
return next;
}
......@@ -761,28 +745,10 @@ int register_trace_event(struct trace_event *event)
if (WARN_ON(!event->funcs))
goto out;
INIT_LIST_HEAD(&event->list);
if (!event->type) {
struct list_head *list = NULL;
if (next_event_type > TRACE_EVENT_TYPE_MAX) {
event->type = trace_search_list(&list);
event->type = alloc_trace_event_type();
if (!event->type)
goto out;
} else {
event->type = next_event_type++;
list = &ftrace_event_list;
}
if (WARN_ON(ftrace_find_event(event->type)))
goto out;
list_add_tail(&event->list, list);
} else if (WARN(event->type > __TRACE_LAST_TYPE,
"Need to add type to trace.h")) {
goto out;
......@@ -819,7 +785,7 @@ EXPORT_SYMBOL_GPL(register_trace_event);
int __unregister_trace_event(struct trace_event *event)
{
hlist_del(&event->node);
list_del(&event->list);
free_trace_event_type(event->type);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment