Commit 3f4ac23a authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf dsos: Switch backing storage to array from rbtree/list

DSOs were held on a list for fast iteration and in an rbtree for fast
finds.

Switch to using a lazily sorted array where iteration is just iterating
through the array and binary searches are the same complexity as
searching the rbtree.

The find may need to sort the array first which does increase the
complexity, but add operations have lower complexity and overall the
complexity should remain about the same.

The set name operations on the dso just records that the array is no
longer sorted, avoiding complexity in rebalancing the rbtree.

Tighter locking discipline is enforced to avoid the array being resorted
while long and short names or ids are changed.

The array is smaller in size, replacing 6 pointers with 2, and so even
with extra allocated space in the array, the array may be 50%
unoccupied, the memory saving should be at least 2x.

Committer testing:

On a previous version of this patchset we were getting a lot of warnings
about deleting a DSO still on a list, now it is ok:

  root@x1:~# perf probe -l
  root@x1:~# perf probe finish_task_switch
  Added new event:
    probe:finish_task_switch (on finish_task_switch)

  You can now use it in all perf tools, such as:

  	perf record -e probe:finish_task_switch -aR sleep 1

  root@x1:~# perf probe -l
    probe:finish_task_switch (on finish_task_switch@kernel/sched/core.c)
  root@x1:~# perf trace -e probe:finish_task_switch/max-stack=8/ --max-events=1
       0.000 migration/0/19 probe:finish_task_switch(__probe_ip: -1894408688)
                                         finish_task_switch.isra.0 ([kernel.kallsyms])
                                         __schedule ([kernel.kallsyms])
                                         schedule ([kernel.kallsyms])
                                         smpboot_thread_fn ([kernel.kallsyms])
                                         kthread ([kernel.kallsyms])
                                         ret_from_fork ([kernel.kallsyms])
                                         ret_from_fork_asm ([kernel.kallsyms])
  root@x1:~#
  root@x1:~# perf probe -d probe:*
  Removed event: probe:finish_task_switch
  root@x1:~# perf probe -l
  root@x1:~#

I also ran the full 'perf test' suite after applying this one, no
regressions.
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Tested-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ben Gainey <ben.gainey@arm.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Chengen Du <chengen.du@canonical.com>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Dima Kogan <dima@secretsauce.net>
Cc: Ilkka Koskinen <ilkka@os.amperecomputing.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Li Dong <lidong@vivo.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paran Lee <p4ranlee@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Cc: Sun Haiyong <sunhaiyong@loongson.cn>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Cc: Yanteng Si <siyanteng@loongson.cn>
Cc: zhaimingbing <zhaimingbing@cmss.chinamobile.com>
Link: https://lore.kernel.org/r/20240504213803.218974-2-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 77a70f80
...@@ -1241,35 +1241,35 @@ struct dso *machine__findnew_kernel(struct machine *machine, const char *name, ...@@ -1241,35 +1241,35 @@ struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
return dso; return dso;
} }
static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated) static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
{ {
struct rb_root *root = dso->root; struct dsos *dsos = dso->dsos;
if (name == NULL) if (name == NULL)
return; return;
if (dso->long_name_allocated) if (dsos) {
free((char *)dso->long_name);
if (root) {
rb_erase(&dso->rb_node, root);
/* /*
* __dsos__findnew_link_by_longname_id() isn't guaranteed to * Need to avoid re-sorting the dsos breaking by non-atomically
* add it back, so a clean removal is required here. * renaming the dso.
*/ */
RB_CLEAR_NODE(&dso->rb_node); down_write(&dsos->lock);
dso->root = NULL;
} }
if (dso->long_name_allocated)
free((char *)dso->long_name);
dso->long_name = name; dso->long_name = name;
dso->long_name_len = strlen(name); dso->long_name_len = strlen(name);
dso->long_name_allocated = name_allocated; dso->long_name_allocated = name_allocated;
if (root) if (dsos) {
__dsos__findnew_link_by_longname_id(root, dso, NULL, id); dsos->sorted = false;
up_write(&dsos->lock);
}
} }
static int __dso_id__cmp(struct dso_id *a, struct dso_id *b) static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
{ {
if (a->maj > b->maj) return -1; if (a->maj > b->maj) return -1;
if (a->maj < b->maj) return 1; if (a->maj < b->maj) return 1;
...@@ -1297,7 +1297,7 @@ static int __dso_id__cmp(struct dso_id *a, struct dso_id *b) ...@@ -1297,7 +1297,7 @@ static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
return 0; return 0;
} }
bool dso_id__empty(struct dso_id *id) bool dso_id__empty(const struct dso_id *id)
{ {
if (!id) if (!id)
return true; return true;
...@@ -1305,15 +1305,22 @@ bool dso_id__empty(struct dso_id *id) ...@@ -1305,15 +1305,22 @@ bool dso_id__empty(struct dso_id *id)
return !id->maj && !id->min && !id->ino && !id->ino_generation; return !id->maj && !id->min && !id->ino && !id->ino_generation;
} }
void dso__inject_id(struct dso *dso, struct dso_id *id) void __dso__inject_id(struct dso *dso, struct dso_id *id)
{ {
struct dsos *dsos = dso->dsos;
/* dsos write lock held by caller. */
dso->id.maj = id->maj; dso->id.maj = id->maj;
dso->id.min = id->min; dso->id.min = id->min;
dso->id.ino = id->ino; dso->id.ino = id->ino;
dso->id.ino_generation = id->ino_generation; dso->id.ino_generation = id->ino_generation;
if (dsos)
dsos->sorted = false;
} }
int dso_id__cmp(struct dso_id *a, struct dso_id *b) int dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
{ {
/* /*
* The second is always dso->id, so zeroes if not set, assume passing * The second is always dso->id, so zeroes if not set, assume passing
...@@ -1332,20 +1339,34 @@ int dso__cmp_id(struct dso *a, struct dso *b) ...@@ -1332,20 +1339,34 @@ int dso__cmp_id(struct dso *a, struct dso *b)
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
{ {
dso__set_long_name_id(dso, name, NULL, name_allocated); dso__set_long_name_id(dso, name, name_allocated);
} }
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
{ {
struct dsos *dsos = dso->dsos;
if (name == NULL) if (name == NULL)
return; return;
if (dsos) {
/*
* Need to avoid re-sorting the dsos breaking by non-atomically
* renaming the dso.
*/
down_write(&dsos->lock);
}
if (dso->short_name_allocated) if (dso->short_name_allocated)
free((char *)dso->short_name); free((char *)dso->short_name);
dso->short_name = name; dso->short_name = name;
dso->short_name_len = strlen(name); dso->short_name_len = strlen(name);
dso->short_name_allocated = name_allocated; dso->short_name_allocated = name_allocated;
if (dsos) {
dsos->sorted = false;
up_write(&dsos->lock);
}
} }
int dso__name_len(const struct dso *dso) int dso__name_len(const struct dso *dso)
...@@ -1381,7 +1402,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id) ...@@ -1381,7 +1402,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
strcpy(dso->name, name); strcpy(dso->name, name);
if (id) if (id)
dso->id = *id; dso->id = *id;
dso__set_long_name_id(dso, dso->name, id, false); dso__set_long_name_id(dso, dso->name, false);
dso__set_short_name(dso, dso->name, false); dso__set_short_name(dso, dso->name, false);
dso->symbols = RB_ROOT_CACHED; dso->symbols = RB_ROOT_CACHED;
dso->symbol_names = NULL; dso->symbol_names = NULL;
...@@ -1406,9 +1427,6 @@ struct dso *dso__new_id(const char *name, struct dso_id *id) ...@@ -1406,9 +1427,6 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
dso->is_kmod = 0; dso->is_kmod = 0;
dso->needs_swap = DSO_SWAP__UNSET; dso->needs_swap = DSO_SWAP__UNSET;
dso->comp = COMP_ID__NONE; dso->comp = COMP_ID__NONE;
RB_CLEAR_NODE(&dso->rb_node);
dso->root = NULL;
INIT_LIST_HEAD(&dso->node);
INIT_LIST_HEAD(&dso->data.open_entry); INIT_LIST_HEAD(&dso->data.open_entry);
mutex_init(&dso->lock); mutex_init(&dso->lock);
refcount_set(&dso->refcnt, 1); refcount_set(&dso->refcnt, 1);
...@@ -1424,9 +1442,8 @@ struct dso *dso__new(const char *name) ...@@ -1424,9 +1442,8 @@ struct dso *dso__new(const char *name)
void dso__delete(struct dso *dso) void dso__delete(struct dso *dso)
{ {
if (!RB_EMPTY_NODE(&dso->rb_node)) if (dso->dsos)
pr_err("DSO %s is still in rbtree when being deleted!\n", pr_err("DSO %s is still in rbtree when being deleted!\n", dso->long_name);
dso->long_name);
/* free inlines first, as they reference symbols */ /* free inlines first, as they reference symbols */
inlines__tree_delete(&dso->inlined_nodes); inlines__tree_delete(&dso->inlined_nodes);
......
...@@ -146,9 +146,7 @@ struct auxtrace_cache; ...@@ -146,9 +146,7 @@ struct auxtrace_cache;
struct dso { struct dso {
struct mutex lock; struct mutex lock;
struct list_head node; struct dsos *dsos;
struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root *root; /* root of rbtree that rb_node is in */
struct rb_root_cached symbols; struct rb_root_cached symbols;
struct symbol **symbol_names; struct symbol **symbol_names;
size_t symbol_names_len; size_t symbol_names_len;
...@@ -238,8 +236,8 @@ static inline void dso__set_loaded(struct dso *dso) ...@@ -238,8 +236,8 @@ static inline void dso__set_loaded(struct dso *dso)
dso->loaded = true; dso->loaded = true;
} }
int dso_id__cmp(struct dso_id *a, struct dso_id *b); int dso_id__cmp(const struct dso_id *a, const struct dso_id *b);
bool dso_id__empty(struct dso_id *id); bool dso_id__empty(const struct dso_id *id);
struct dso *dso__new_id(const char *name, struct dso_id *id); struct dso *dso__new_id(const char *name, struct dso_id *id);
struct dso *dso__new(const char *name); struct dso *dso__new(const char *name);
...@@ -248,7 +246,7 @@ void dso__delete(struct dso *dso); ...@@ -248,7 +246,7 @@ void dso__delete(struct dso *dso);
int dso__cmp_id(struct dso *a, struct dso *b); int dso__cmp_id(struct dso *a, struct dso *b);
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated); void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated); void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
void dso__inject_id(struct dso *dso, struct dso_id *id); void __dso__inject_id(struct dso *dso, struct dso_id *id);
int dso__name_len(const struct dso *dso); int dso__name_len(const struct dso *dso);
......
...@@ -14,24 +14,30 @@ ...@@ -14,24 +14,30 @@
void dsos__init(struct dsos *dsos) void dsos__init(struct dsos *dsos)
{ {
INIT_LIST_HEAD(&dsos->head);
dsos->root = RB_ROOT;
init_rwsem(&dsos->lock); init_rwsem(&dsos->lock);
dsos->cnt = 0;
dsos->allocated = 0;
dsos->dsos = NULL;
dsos->sorted = true;
} }
static void dsos__purge(struct dsos *dsos) static void dsos__purge(struct dsos *dsos)
{ {
struct dso *pos, *n;
down_write(&dsos->lock); down_write(&dsos->lock);
list_for_each_entry_safe(pos, n, &dsos->head, node) { for (unsigned int i = 0; i < dsos->cnt; i++) {
RB_CLEAR_NODE(&pos->rb_node); struct dso *dso = dsos->dsos[i];
pos->root = NULL;
list_del_init(&pos->node); dso->dsos = NULL;
dso__put(pos); dso__put(dso);
} }
zfree(&dsos->dsos);
dsos->cnt = 0;
dsos->allocated = 0;
dsos->sorted = true;
up_write(&dsos->lock); up_write(&dsos->lock);
} }
...@@ -46,9 +52,8 @@ static int __dsos__for_each_dso(struct dsos *dsos, ...@@ -46,9 +52,8 @@ static int __dsos__for_each_dso(struct dsos *dsos,
int (*cb)(struct dso *dso, void *data), int (*cb)(struct dso *dso, void *data),
void *data) void *data)
{ {
struct dso *dso; for (unsigned int i = 0; i < dsos->cnt; i++) {
struct dso *dso = dsos->dsos[i];
list_for_each_entry(dso, &dsos->head, node) {
int err; int err;
err = cb(dso, data); err = cb(dso, data);
...@@ -119,16 +124,47 @@ static int dso__cmp_short_name(struct dso *a, struct dso *b) ...@@ -119,16 +124,47 @@ static int dso__cmp_short_name(struct dso *a, struct dso *b)
return __dso__cmp_short_name(a->short_name, &a->id, b); return __dso__cmp_short_name(a->short_name, &a->id, b);
} }
static int dsos__cmp_long_name_id_short_name(const void *va, const void *vb)
{
const struct dso *a = *((const struct dso **)va);
const struct dso *b = *((const struct dso **)vb);
int rc = strcmp(a->long_name, b->long_name);
if (!rc) {
rc = dso_id__cmp(&a->id, &b->id);
if (!rc)
rc = strcmp(a->short_name, b->short_name);
}
return rc;
}
/* /*
* Find a matching entry and/or link current entry to RB tree. * Find a matching entry and/or link current entry to RB tree.
* Either one of the dso or name parameter must be non-NULL or the * Either one of the dso or name parameter must be non-NULL or the
* function will not work. * function will not work.
*/ */
struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso, struct dso *__dsos__findnew_link_by_longname_id(struct dsos *dsos,
const char *name, struct dso_id *id) struct dso *dso,
const char *name,
struct dso_id *id,
bool write_locked)
{ {
struct rb_node **p = &root->rb_node; int low = 0, high = dsos->cnt - 1;
struct rb_node *parent = NULL;
if (!dsos->sorted) {
if (!write_locked) {
up_read(&dsos->lock);
down_write(&dsos->lock);
dso = __dsos__findnew_link_by_longname_id(dsos, dso, name, id,
/*write_locked=*/true);
up_write(&dsos->lock);
down_read(&dsos->lock);
return dso;
}
qsort(dsos->dsos, dsos->cnt, sizeof(struct dso *),
dsos__cmp_long_name_id_short_name);
dsos->sorted = true;
}
if (!name) if (!name)
name = dso->long_name; name = dso->long_name;
...@@ -136,11 +172,11 @@ struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso ...@@ -136,11 +172,11 @@ struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso
/* /*
* Find node with the matching name * Find node with the matching name
*/ */
while (*p) { while (low <= high) {
struct dso *this = rb_entry(*p, struct dso, rb_node); int mid = (low + high) / 2;
struct dso *this = dsos->dsos[mid];
int rc = __dso__cmp_long_name(name, id, this); int rc = __dso__cmp_long_name(name, id, this);
parent = *p;
if (rc == 0) { if (rc == 0) {
/* /*
* In case the new DSO is a duplicate of an existing * In case the new DSO is a duplicate of an existing
...@@ -161,56 +197,53 @@ struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso ...@@ -161,56 +197,53 @@ struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso
} }
} }
if (rc < 0) if (rc < 0)
p = &parent->rb_left; high = mid - 1;
else else
p = &parent->rb_right; low = mid + 1;
}
if (dso) {
/* Add new node and rebalance tree */
rb_link_node(&dso->rb_node, parent, p);
rb_insert_color(&dso->rb_node, root);
dso->root = root;
} }
if (dso)
__dsos__add(dsos, dso);
return NULL; return NULL;
} }
void __dsos__add(struct dsos *dsos, struct dso *dso) int __dsos__add(struct dsos *dsos, struct dso *dso)
{ {
list_add_tail(&dso->node, &dsos->head); if (dsos->cnt == dsos->allocated) {
__dsos__findnew_link_by_longname_id(&dsos->root, dso, NULL, &dso->id); unsigned int to_allocate = 2;
/* struct dso **temp;
* It is now in the linked list, grab a reference, then garbage collect
* this when needing memory, by looking at LRU dso instances in the if (dsos->allocated > 0)
* list with atomic_read(&dso->refcnt) == 1, i.e. no references to_allocate = dsos->allocated * 2;
* anywhere besides the one for the list, do, under a lock for the temp = realloc(dsos->dsos, sizeof(struct dso *) * to_allocate);
* list: remove it from the list, then a dso__put(), that probably will if (!temp)
* be the last and will then call dso__delete(), end of life. return -ENOMEM;
* dsos->dsos = temp;
* That, or at the end of the 'struct machine' lifetime, when all dsos->allocated = to_allocate;
* 'struct dso' instances will be removed from the list, in }
* dsos__exit(), if they have no other reference from some other data dsos->dsos[dsos->cnt++] = dso__get(dso);
* structure. if (dsos->cnt >= 2 && dsos->sorted) {
* dsos->sorted = dsos__cmp_long_name_id_short_name(&dsos->dsos[dsos->cnt - 2],
* E.g.: after processing a 'perf.data' file and storing references &dsos->dsos[dsos->cnt - 1])
* to objects instantiated while processing events, we will have <= 0;
* references to the 'thread', 'map', 'dso' structs all from 'struct }
* hist_entry' instances, but we may not need anything not referenced, dso->dsos = dsos;
* so we might as well call machines__exit()/machines__delete() and return 0;
* garbage collect it.
*/
dso__get(dso);
} }
void dsos__add(struct dsos *dsos, struct dso *dso) int dsos__add(struct dsos *dsos, struct dso *dso)
{ {
int ret;
down_write(&dsos->lock); down_write(&dsos->lock);
__dsos__add(dsos, dso); ret = __dsos__add(dsos, dso);
up_write(&dsos->lock); up_write(&dsos->lock);
return ret;
} }
static struct dso *__dsos__findnew_by_longname_id(struct rb_root *root, const char *name, struct dso_id *id) static struct dso *__dsos__findnew_by_longname_id(struct dsos *dsos, const char *name,
struct dso_id *id, bool write_locked)
{ {
return __dsos__findnew_link_by_longname_id(root, NULL, name, id); return __dsos__findnew_link_by_longname_id(dsos, NULL, name, id, write_locked);
} }
struct dsos__find_id_cb_args { struct dsos__find_id_cb_args {
...@@ -231,7 +264,8 @@ static int dsos__find_id_cb(struct dso *dso, void *data) ...@@ -231,7 +264,8 @@ static int dsos__find_id_cb(struct dso *dso, void *data)
} }
static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id, bool cmp_short) static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id,
bool cmp_short, bool write_locked)
{ {
struct dso *res; struct dso *res;
...@@ -245,7 +279,7 @@ static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct d ...@@ -245,7 +279,7 @@ static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct d
__dsos__for_each_dso(dsos, dsos__find_id_cb, &args); __dsos__for_each_dso(dsos, dsos__find_id_cb, &args);
return args.res; return args.res;
} }
res = __dsos__findnew_by_longname_id(&dsos->root, name, id); res = __dsos__findnew_by_longname_id(dsos, name, id, write_locked);
return res; return res;
} }
...@@ -254,7 +288,7 @@ struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short) ...@@ -254,7 +288,7 @@ struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
struct dso *res; struct dso *res;
down_read(&dsos->lock); down_read(&dsos->lock);
res = __dsos__find_id(dsos, name, NULL, cmp_short); res = __dsos__find_id(dsos, name, NULL, cmp_short, /*write_locked=*/false);
up_read(&dsos->lock); up_read(&dsos->lock);
return res; return res;
} }
...@@ -296,8 +330,13 @@ static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct ...@@ -296,8 +330,13 @@ static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct
struct dso *dso = dso__new_id(name, id); struct dso *dso = dso__new_id(name, id);
if (dso != NULL) { if (dso != NULL) {
__dsos__add(dsos, dso); /*
* The dsos lock is held on entry, so rename the dso before
* adding it to avoid needing to take the dsos lock again to say
* the array isn't sorted.
*/
dso__set_basename(dso); dso__set_basename(dso);
__dsos__add(dsos, dso);
} }
return dso; return dso;
} }
...@@ -309,10 +348,10 @@ struct dso *__dsos__addnew(struct dsos *dsos, const char *name) ...@@ -309,10 +348,10 @@ struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id) static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{ {
struct dso *dso = __dsos__find_id(dsos, name, id, false); struct dso *dso = __dsos__find_id(dsos, name, id, false, /*write_locked=*/true);
if (dso && dso_id__empty(&dso->id) && !dso_id__empty(id)) if (dso && dso_id__empty(&dso->id) && !dso_id__empty(id))
dso__inject_id(dso, id); __dso__inject_id(dso, id);
return dso ? dso : __dsos__addnew_id(dsos, name, id); return dso ? dso : __dsos__addnew_id(dsos, name, id);
} }
...@@ -403,18 +442,27 @@ struct dso *dsos__findnew_module_dso(struct dsos *dsos, ...@@ -403,18 +442,27 @@ struct dso *dsos__findnew_module_dso(struct dsos *dsos,
down_write(&dsos->lock); down_write(&dsos->lock);
dso = __dsos__find_id(dsos, m->name, NULL, /*cmp_short=*/true); dso = __dsos__find_id(dsos, m->name, NULL, /*cmp_short=*/true, /*write_locked=*/true);
if (dso) {
up_write(&dsos->lock);
return dso;
}
/*
* Failed to find the dso so create it. Change the name before adding it
* to the array, to avoid unnecessary sorts and potential locking
* issues.
*/
dso = dso__new_id(m->name, /*id=*/NULL);
if (!dso) { if (!dso) {
dso = __dsos__addnew(dsos, m->name); up_write(&dsos->lock);
if (dso == NULL) return NULL;
goto out_unlock;
dso__set_module_info(dso, m, machine);
dso__set_long_name(dso, strdup(filename), true);
dso->kernel = DSO_SPACE__KERNEL;
} }
dso__set_basename(dso);
dso__set_module_info(dso, m, machine);
dso__set_long_name(dso, strdup(filename), true);
dso->kernel = DSO_SPACE__KERNEL;
__dsos__add(dsos, dso);
out_unlock:
up_write(&dsos->lock); up_write(&dsos->lock);
return dso; return dso;
} }
......
...@@ -14,20 +14,22 @@ struct kmod_path; ...@@ -14,20 +14,22 @@ struct kmod_path;
struct machine; struct machine;
/* /*
* DSOs are put into both a list for fast iteration and rbtree for fast * Collection of DSOs as an array for iteration speed, but sorted for O(n)
* long name lookup. * lookup.
*/ */
struct dsos { struct dsos {
struct list_head head;
struct rb_root root; /* rbtree root sorted by long name */
struct rw_semaphore lock; struct rw_semaphore lock;
struct dso **dsos;
unsigned int cnt;
unsigned int allocated;
bool sorted;
}; };
void dsos__init(struct dsos *dsos); void dsos__init(struct dsos *dsos);
void dsos__exit(struct dsos *dsos); void dsos__exit(struct dsos *dsos);
void __dsos__add(struct dsos *dsos, struct dso *dso); int __dsos__add(struct dsos *dsos, struct dso *dso);
void dsos__add(struct dsos *dsos, struct dso *dso); int dsos__add(struct dsos *dsos, struct dso *dso);
struct dso *__dsos__addnew(struct dsos *dsos, const char *name); struct dso *__dsos__addnew(struct dsos *dsos, const char *name);
struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short); struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
...@@ -35,8 +37,11 @@ struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id ...@@ -35,8 +37,11 @@ struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id
bool dsos__read_build_ids(struct dsos *dsos, bool with_hits); bool dsos__read_build_ids(struct dsos *dsos, bool with_hits);
struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso, struct dso *__dsos__findnew_link_by_longname_id(struct dsos *dsos,
const char *name, struct dso_id *id); struct dso *dso,
const char *name,
struct dso_id *id,
bool write_locked);
size_t dsos__fprintf_buildid(struct dsos *dsos, FILE *fp, size_t dsos__fprintf_buildid(struct dsos *dsos, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm); bool (skip)(struct dso *dso, int parm), int parm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment