Commit ff583dc4 authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf maps: Remove rb_node from struct map

struct map is reference counted, having it also be a node in an
red-black tree complicates the reference counting. Switch to having a
map_rb_node which is a red-block tree node but points at the reference
counted struct map. This reference is responsible for a single reference
count.

Committer notes:

Fixed up tools/perf/util/unwind-libunwind-local.c to use map_rb_node as
well.
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 83720209
...@@ -19,7 +19,7 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, ...@@ -19,7 +19,7 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
struct machine *machine) struct machine *machine)
{ {
int rc = 0; int rc = 0;
struct map *pos; struct map_rb_node *pos;
struct maps *kmaps = machine__kernel_maps(machine); struct maps *kmaps = machine__kernel_maps(machine);
union perf_event *event = zalloc(sizeof(event->mmap) + union perf_event *event = zalloc(sizeof(event->mmap) +
machine->id_hdr_size); machine->id_hdr_size);
...@@ -33,11 +33,12 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, ...@@ -33,11 +33,12 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
maps__for_each_entry(kmaps, pos) { maps__for_each_entry(kmaps, pos) {
struct kmap *kmap; struct kmap *kmap;
size_t size; size_t size;
struct map *map = pos->map;
if (!__map__is_extra_kernel_map(pos)) if (!__map__is_extra_kernel_map(map))
continue; continue;
kmap = map__kmap(pos); kmap = map__kmap(map);
size = sizeof(event->mmap) - sizeof(event->mmap.filename) + size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
...@@ -58,9 +59,9 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, ...@@ -58,9 +59,9 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
event->mmap.header.size = size; event->mmap.header.size = size;
event->mmap.start = pos->start; event->mmap.start = map->start;
event->mmap.len = pos->end - pos->start; event->mmap.len = map->end - map->start;
event->mmap.pgoff = pos->pgoff; event->mmap.pgoff = map->pgoff;
event->mmap.pid = machine->pid; event->mmap.pid = machine->pid;
strlcpy(event->mmap.filename, kmap->name, PATH_MAX); strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
......
...@@ -844,9 +844,11 @@ static struct task *tasks_list(struct task *task, struct machine *machine) ...@@ -844,9 +844,11 @@ static struct task *tasks_list(struct task *task, struct machine *machine)
static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp) static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
{ {
size_t printed = 0; size_t printed = 0;
struct map *map; struct map_rb_node *rb_node;
maps__for_each_entry(maps, rb_node) {
struct map *map = rb_node->map;
maps__for_each_entry(maps, map) {
printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n", printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
indent, "", map->start, map->end, indent, "", map->start, map->end,
map->prot & PROT_READ ? 'r' : '-', map->prot & PROT_READ ? 'r' : '-',
......
...@@ -15,10 +15,12 @@ struct map_def { ...@@ -15,10 +15,12 @@ struct map_def {
static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps) static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps)
{ {
struct map *map; struct map_rb_node *rb_node;
unsigned int i = 0; unsigned int i = 0;
maps__for_each_entry(maps, map) { maps__for_each_entry(maps, rb_node) {
struct map *map = rb_node->map;
if (i > 0) if (i > 0)
TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size)); TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
...@@ -74,7 +76,7 @@ static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest ...@@ -74,7 +76,7 @@ static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest
map->start = bpf_progs[i].start; map->start = bpf_progs[i].start;
map->end = bpf_progs[i].end; map->end = bpf_progs[i].end;
maps__insert(maps, map); TEST_ASSERT_VAL("failed to insert map", maps__insert(maps, map) == 0);
map__put(map); map__put(map);
} }
......
...@@ -118,7 +118,8 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused ...@@ -118,7 +118,8 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
int err = TEST_FAIL; int err = TEST_FAIL;
struct rb_node *nd; struct rb_node *nd;
struct symbol *sym; struct symbol *sym;
struct map *kallsyms_map, *vmlinux_map, *map; struct map *kallsyms_map, *vmlinux_map;
struct map_rb_node *rb_node;
struct machine kallsyms, vmlinux; struct machine kallsyms, vmlinux;
struct maps *maps; struct maps *maps;
u64 mem_start, mem_end; u64 mem_start, mem_end;
...@@ -290,15 +291,15 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused ...@@ -290,15 +291,15 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
header_printed = false; header_printed = false;
maps__for_each_entry(maps, map) { maps__for_each_entry(maps, rb_node) {
struct map * struct map *map = rb_node->map;
/* /*
* If it is the kernel, kallsyms is always "[kernel.kallsyms]", while * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
* the kernel will have the path for the vmlinux file being used, * the kernel will have the path for the vmlinux file being used,
* so use the short name, less descriptive but the same ("[kernel]" in * so use the short name, less descriptive but the same ("[kernel]" in
* both cases. * both cases.
*/ */
pair = maps__find_by_name(kallsyms.kmaps, (map->dso->kernel ? struct map *pair = maps__find_by_name(kallsyms.kmaps, (map->dso->kernel ?
map->dso->short_name : map->dso->short_name :
map->dso->name)); map->dso->name));
if (pair) { if (pair) {
...@@ -314,8 +315,8 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused ...@@ -314,8 +315,8 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
header_printed = false; header_printed = false;
maps__for_each_entry(maps, map) { maps__for_each_entry(maps, rb_node) {
struct map *pair; struct map *pair, *map = rb_node->map;
mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start); mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end); mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
...@@ -344,7 +345,9 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused ...@@ -344,7 +345,9 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
maps = machine__kernel_maps(&kallsyms); maps = machine__kernel_maps(&kallsyms);
maps__for_each_entry(maps, map) { maps__for_each_entry(maps, rb_node) {
struct map *map = rb_node->map;
if (!map->priv) { if (!map->priv) {
if (!header_printed) { if (!header_printed) {
pr_info("WARN: Maps only in kallsyms:\n"); pr_info("WARN: Maps only in kallsyms:\n");
......
...@@ -284,7 +284,7 @@ int lock_contention_read(struct lock_contention *con) ...@@ -284,7 +284,7 @@ int lock_contention_read(struct lock_contention *con)
} }
/* make sure it loads the kernel map */ /* make sure it loads the kernel map */
map__load(maps__first(machine->kmaps)); map__load(maps__first(machine->kmaps)->map);
prev_key = NULL; prev_key = NULL;
while (!bpf_map_get_next_key(fd, prev_key, &key)) { while (!bpf_map_get_next_key(fd, prev_key, &key)) {
......
...@@ -883,6 +883,7 @@ static int machine__process_ksymbol_register(struct machine *machine, ...@@ -883,6 +883,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
if (!map) { if (!map) {
struct dso *dso = dso__new(event->ksymbol.name); struct dso *dso = dso__new(event->ksymbol.name);
int err;
if (dso) { if (dso) {
dso->kernel = DSO_SPACE__KERNEL; dso->kernel = DSO_SPACE__KERNEL;
...@@ -902,8 +903,11 @@ static int machine__process_ksymbol_register(struct machine *machine, ...@@ -902,8 +903,11 @@ static int machine__process_ksymbol_register(struct machine *machine,
map->start = event->ksymbol.addr; map->start = event->ksymbol.addr;
map->end = map->start + event->ksymbol.len; map->end = map->start + event->ksymbol.len;
maps__insert(machine__kernel_maps(machine), map); err = maps__insert(machine__kernel_maps(machine), map);
map__put(map); map__put(map);
if (err)
return err;
dso__set_loaded(dso); dso__set_loaded(dso);
if (is_bpf_image(event->ksymbol.name)) { if (is_bpf_image(event->ksymbol.name)) {
...@@ -1003,6 +1007,7 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start ...@@ -1003,6 +1007,7 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start
struct map *map = NULL; struct map *map = NULL;
struct kmod_path m; struct kmod_path m;
struct dso *dso; struct dso *dso;
int err;
if (kmod_path__parse_name(&m, filename)) if (kmod_path__parse_name(&m, filename))
return NULL; return NULL;
...@@ -1015,10 +1020,14 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start ...@@ -1015,10 +1020,14 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start
if (map == NULL) if (map == NULL)
goto out; goto out;
maps__insert(machine__kernel_maps(machine), map); err = maps__insert(machine__kernel_maps(machine), map);
/* Put the map here because maps__insert already got it */ /* Put the map here because maps__insert already got it */
map__put(map); map__put(map);
/* If maps__insert failed, return NULL. */
if (err)
map = NULL;
out: out:
/* put the dso here, corresponding to machine__findnew_module_dso */ /* put the dso here, corresponding to machine__findnew_module_dso */
dso__put(dso); dso__put(dso);
...@@ -1185,10 +1194,11 @@ int machine__create_extra_kernel_map(struct machine *machine, ...@@ -1185,10 +1194,11 @@ int machine__create_extra_kernel_map(struct machine *machine,
{ {
struct kmap *kmap; struct kmap *kmap;
struct map *map; struct map *map;
int err;
map = map__new2(xm->start, kernel); map = map__new2(xm->start, kernel);
if (!map) if (!map)
return -1; return -ENOMEM;
map->end = xm->end; map->end = xm->end;
map->pgoff = xm->pgoff; map->pgoff = xm->pgoff;
...@@ -1197,14 +1207,16 @@ int machine__create_extra_kernel_map(struct machine *machine, ...@@ -1197,14 +1207,16 @@ int machine__create_extra_kernel_map(struct machine *machine,
strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
maps__insert(machine__kernel_maps(machine), map); err = maps__insert(machine__kernel_maps(machine), map);
if (!err) {
pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n", pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
kmap->name, map->start, map->end); kmap->name, map->start, map->end);
}
map__put(map); map__put(map);
return 0; return err;
} }
static u64 find_entry_trampoline(struct dso *dso) static u64 find_entry_trampoline(struct dso *dso)
...@@ -1245,16 +1257,16 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine, ...@@ -1245,16 +1257,16 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine,
struct maps *kmaps = machine__kernel_maps(machine); struct maps *kmaps = machine__kernel_maps(machine);
int nr_cpus_avail, cpu; int nr_cpus_avail, cpu;
bool found = false; bool found = false;
struct map *map; struct map_rb_node *rb_node;
u64 pgoff; u64 pgoff;
/* /*
* In the vmlinux case, pgoff is a virtual address which must now be * In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset. * mapped to a vmlinux offset.
*/ */
maps__for_each_entry(kmaps, map) { maps__for_each_entry(kmaps, rb_node) {
struct map *dest_map, *map = rb_node->map;
struct kmap *kmap = __map__kmap(map); struct kmap *kmap = __map__kmap(map);
struct map *dest_map;
if (!kmap || !is_entry_trampoline(kmap->name)) if (!kmap || !is_entry_trampoline(kmap->name))
continue; continue;
...@@ -1309,11 +1321,10 @@ __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) ...@@ -1309,11 +1321,10 @@ __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
machine->vmlinux_map = map__new2(0, kernel); machine->vmlinux_map = map__new2(0, kernel);
if (machine->vmlinux_map == NULL) if (machine->vmlinux_map == NULL)
return -1; return -ENOMEM;
machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip; machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
maps__insert(machine__kernel_maps(machine), machine->vmlinux_map); return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
return 0;
} }
void machine__destroy_kernel_maps(struct machine *machine) void machine__destroy_kernel_maps(struct machine *machine)
...@@ -1635,25 +1646,26 @@ static void machine__set_kernel_mmap(struct machine *machine, ...@@ -1635,25 +1646,26 @@ static void machine__set_kernel_mmap(struct machine *machine,
machine->vmlinux_map->end = ~0ULL; machine->vmlinux_map->end = ~0ULL;
} }
static void machine__update_kernel_mmap(struct machine *machine, static int machine__update_kernel_mmap(struct machine *machine,
u64 start, u64 end) u64 start, u64 end)
{ {
struct map *map = machine__kernel_map(machine); struct map *map = machine__kernel_map(machine);
int err;
map__get(map); map__get(map);
maps__remove(machine__kernel_maps(machine), map); maps__remove(machine__kernel_maps(machine), map);
machine__set_kernel_mmap(machine, start, end); machine__set_kernel_mmap(machine, start, end);
maps__insert(machine__kernel_maps(machine), map); err = maps__insert(machine__kernel_maps(machine), map);
map__put(map); map__put(map);
return err;
} }
int machine__create_kernel_maps(struct machine *machine) int machine__create_kernel_maps(struct machine *machine)
{ {
struct dso *kernel = machine__get_kernel(machine); struct dso *kernel = machine__get_kernel(machine);
const char *name = NULL; const char *name = NULL;
struct map *map;
u64 start = 0, end = ~0ULL; u64 start = 0, end = ~0ULL;
int ret; int ret;
...@@ -1685,7 +1697,9 @@ int machine__create_kernel_maps(struct machine *machine) ...@@ -1685,7 +1697,9 @@ int machine__create_kernel_maps(struct machine *machine)
* we have a real start address now, so re-order the kmaps * we have a real start address now, so re-order the kmaps
* assume it's the last in the kmaps * assume it's the last in the kmaps
*/ */
machine__update_kernel_mmap(machine, start, end); ret = machine__update_kernel_mmap(machine, start, end);
if (ret < 0)
goto out_put;
} }
if (machine__create_extra_kernel_maps(machine, kernel)) if (machine__create_extra_kernel_maps(machine, kernel))
...@@ -1693,9 +1707,12 @@ int machine__create_kernel_maps(struct machine *machine) ...@@ -1693,9 +1707,12 @@ int machine__create_kernel_maps(struct machine *machine)
if (end == ~0ULL) { if (end == ~0ULL) {
/* update end address of the kernel map using adjacent module address */ /* update end address of the kernel map using adjacent module address */
map = map__next(machine__kernel_map(machine)); struct map_rb_node *rb_node = maps__find_node(machine__kernel_maps(machine),
if (map) machine__kernel_map(machine));
machine__set_kernel_mmap(machine, start, map->start); struct map_rb_node *next = map_rb_node__next(rb_node);
if (next)
machine__set_kernel_mmap(machine, start, next->map->start);
} }
out_put: out_put:
...@@ -1828,7 +1845,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine, ...@@ -1828,7 +1845,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (strstr(kernel->long_name, "vmlinux")) if (strstr(kernel->long_name, "vmlinux"))
dso__set_short_name(kernel, "[kernel.vmlinux]", false); dso__set_short_name(kernel, "[kernel.vmlinux]", false);
machine__update_kernel_mmap(machine, xm->start, xm->end); if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
dso__put(kernel);
goto out_problem;
}
if (build_id__is_defined(bid)) if (build_id__is_defined(bid))
dso__set_build_id(kernel, bid); dso__set_build_id(kernel, bid);
...@@ -3330,11 +3350,11 @@ int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv ...@@ -3330,11 +3350,11 @@ int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv
int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv) int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
{ {
struct maps *maps = machine__kernel_maps(machine); struct maps *maps = machine__kernel_maps(machine);
struct map *map; struct map_rb_node *pos;
int err = 0; int err = 0;
for (map = maps__first(maps); map != NULL; map = map__next(map)) { maps__for_each_entry(maps, pos) {
err = fn(map, priv); err = fn(pos->map, priv);
if (err != 0) { if (err != 0) {
break; break;
} }
......
...@@ -111,7 +111,6 @@ void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) ...@@ -111,7 +111,6 @@ void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
map->dso = dso__get(dso); map->dso = dso__get(dso);
map->map_ip = map__map_ip; map->map_ip = map__map_ip;
map->unmap_ip = map__unmap_ip; map->unmap_ip = map__unmap_ip;
RB_CLEAR_NODE(&map->rb_node);
map->erange_warned = false; map->erange_warned = false;
refcount_set(&map->refcnt, 1); refcount_set(&map->refcnt, 1);
} }
...@@ -397,7 +396,6 @@ struct map *map__clone(struct map *from) ...@@ -397,7 +396,6 @@ struct map *map__clone(struct map *from)
map = memdup(from, size); map = memdup(from, size);
if (map != NULL) { if (map != NULL) {
refcount_set(&map->refcnt, 1); refcount_set(&map->refcnt, 1);
RB_CLEAR_NODE(&map->rb_node);
dso__get(map->dso); dso__get(map->dso);
} }
...@@ -537,20 +535,6 @@ bool map__contains_symbol(const struct map *map, const struct symbol *sym) ...@@ -537,20 +535,6 @@ bool map__contains_symbol(const struct map *map, const struct symbol *sym)
return ip >= map->start && ip < map->end; return ip >= map->start && ip < map->end;
} }
static struct map *__map__next(struct map *map)
{
struct rb_node *next = rb_next(&map->rb_node);
if (next)
return rb_entry(next, struct map, rb_node);
return NULL;
}
struct map *map__next(struct map *map)
{
return map ? __map__next(map) : NULL;
}
struct kmap *__map__kmap(struct map *map) struct kmap *__map__kmap(struct map *map)
{ {
if (!map->dso || !map->dso->kernel) if (!map->dso || !map->dso->kernel)
......
...@@ -16,7 +16,6 @@ struct maps; ...@@ -16,7 +16,6 @@ struct maps;
struct machine; struct machine;
struct map { struct map {
struct rb_node rb_node;
u64 start; u64 start;
u64 end; u64 end;
bool erange_warned:1; bool erange_warned:1;
......
...@@ -10,8 +10,6 @@ ...@@ -10,8 +10,6 @@
#include "ui/ui.h" #include "ui/ui.h"
#include "unwind.h" #include "unwind.h"
static void __maps__insert(struct maps *maps, struct map *map);
static void maps__init(struct maps *maps, struct machine *machine) static void maps__init(struct maps *maps, struct machine *machine)
{ {
maps->entries = RB_ROOT; maps->entries = RB_ROOT;
...@@ -32,10 +30,44 @@ static void __maps__free_maps_by_name(struct maps *maps) ...@@ -32,10 +30,44 @@ static void __maps__free_maps_by_name(struct maps *maps)
maps->nr_maps_allocated = 0; maps->nr_maps_allocated = 0;
} }
void maps__insert(struct maps *maps, struct map *map) static int __maps__insert(struct maps *maps, struct map *map)
{
struct rb_node **p = &maps->entries.rb_node;
struct rb_node *parent = NULL;
const u64 ip = map->start;
struct map_rb_node *m, *new_rb_node;
new_rb_node = malloc(sizeof(*new_rb_node));
if (!new_rb_node)
return -ENOMEM;
RB_CLEAR_NODE(&new_rb_node->rb_node);
new_rb_node->map = map;
while (*p != NULL) {
parent = *p;
m = rb_entry(parent, struct map_rb_node, rb_node);
if (ip < m->map->start)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&new_rb_node->rb_node, parent, p);
rb_insert_color(&new_rb_node->rb_node, &maps->entries);
map__get(map);
return 0;
}
int maps__insert(struct maps *maps, struct map *map)
{ {
int err;
down_write(&maps->lock); down_write(&maps->lock);
__maps__insert(maps, map); err = __maps__insert(maps, map);
if (err)
goto out;
++maps->nr_maps; ++maps->nr_maps;
if (map->dso && map->dso->kernel) { if (map->dso && map->dso->kernel) {
...@@ -59,32 +91,39 @@ void maps__insert(struct maps *maps, struct map *map) ...@@ -59,32 +91,39 @@ void maps__insert(struct maps *maps, struct map *map)
if (maps_by_name == NULL) { if (maps_by_name == NULL) {
__maps__free_maps_by_name(maps); __maps__free_maps_by_name(maps);
up_write(&maps->lock); err = -ENOMEM;
return; goto out;
} }
maps->maps_by_name = maps_by_name; maps->maps_by_name = maps_by_name;
maps->nr_maps_allocated = nr_allocate; maps->nr_maps_allocated = nr_allocate;
} }
maps->maps_by_name[maps->nr_maps - 1] = map; maps->maps_by_name[maps->nr_maps - 1] = map;
__maps__sort_by_name(maps); __maps__sort_by_name(maps);
} }
out:
up_write(&maps->lock); up_write(&maps->lock);
return err;
} }
static void __maps__remove(struct maps *maps, struct map *map) static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
{ {
rb_erase_init(&map->rb_node, &maps->entries); rb_erase_init(&rb_node->rb_node, &maps->entries);
map__put(map); map__put(rb_node->map);
free(rb_node);
} }
void maps__remove(struct maps *maps, struct map *map) void maps__remove(struct maps *maps, struct map *map)
{ {
struct map_rb_node *rb_node;
down_write(&maps->lock); down_write(&maps->lock);
if (maps->last_search_by_name == map) if (maps->last_search_by_name == map)
maps->last_search_by_name = NULL; maps->last_search_by_name = NULL;
__maps__remove(maps, map); rb_node = maps__find_node(maps, map);
assert(rb_node->map == map);
__maps__remove(maps, rb_node);
--maps->nr_maps; --maps->nr_maps;
if (maps->maps_by_name) if (maps->maps_by_name)
__maps__free_maps_by_name(maps); __maps__free_maps_by_name(maps);
...@@ -93,11 +132,12 @@ void maps__remove(struct maps *maps, struct map *map) ...@@ -93,11 +132,12 @@ void maps__remove(struct maps *maps, struct map *map)
static void __maps__purge(struct maps *maps) static void __maps__purge(struct maps *maps)
{ {
struct map *pos, *next; struct map_rb_node *pos, *next;
maps__for_each_entry_safe(maps, pos, next) { maps__for_each_entry_safe(maps, pos, next) {
rb_erase_init(&pos->rb_node, &maps->entries); rb_erase_init(&pos->rb_node, &maps->entries);
map__put(pos); map__put(pos->map);
free(pos);
} }
} }
...@@ -153,21 +193,21 @@ struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) ...@@ -153,21 +193,21 @@ struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
{ {
struct symbol *sym; struct symbol *sym;
struct map *pos; struct map_rb_node *pos;
down_read(&maps->lock); down_read(&maps->lock);
maps__for_each_entry(maps, pos) { maps__for_each_entry(maps, pos) {
sym = map__find_symbol_by_name(pos, name); sym = map__find_symbol_by_name(pos->map, name);
if (sym == NULL) if (sym == NULL)
continue; continue;
if (!map__contains_symbol(pos, sym)) { if (!map__contains_symbol(pos->map, sym)) {
sym = NULL; sym = NULL;
continue; continue;
} }
if (mapp != NULL) if (mapp != NULL)
*mapp = pos; *mapp = pos->map;
goto out; goto out;
} }
...@@ -196,15 +236,15 @@ int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams) ...@@ -196,15 +236,15 @@ int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
size_t maps__fprintf(struct maps *maps, FILE *fp) size_t maps__fprintf(struct maps *maps, FILE *fp)
{ {
size_t printed = 0; size_t printed = 0;
struct map *pos; struct map_rb_node *pos;
down_read(&maps->lock); down_read(&maps->lock);
maps__for_each_entry(maps, pos) { maps__for_each_entry(maps, pos) {
printed += fprintf(fp, "Map:"); printed += fprintf(fp, "Map:");
printed += map__fprintf(pos, fp); printed += map__fprintf(pos->map, fp);
if (verbose > 2) { if (verbose > 2) {
printed += dso__fprintf(pos->dso, fp); printed += dso__fprintf(pos->map->dso, fp);
printed += fprintf(fp, "--\n"); printed += fprintf(fp, "--\n");
} }
} }
...@@ -231,11 +271,11 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) ...@@ -231,11 +271,11 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
next = root->rb_node; next = root->rb_node;
first = NULL; first = NULL;
while (next) { while (next) {
struct map *pos = rb_entry(next, struct map, rb_node); struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
if (pos->end > map->start) { if (pos->map->end > map->start) {
first = next; first = next;
if (pos->start <= map->start) if (pos->map->start <= map->start)
break; break;
next = next->rb_left; next = next->rb_left;
} else } else
...@@ -244,14 +284,14 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) ...@@ -244,14 +284,14 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
next = first; next = first;
while (next) { while (next) {
struct map *pos = rb_entry(next, struct map, rb_node); struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
next = rb_next(&pos->rb_node); next = rb_next(&pos->rb_node);
/* /*
* Stop if current map starts after map->end. * Stop if current map starts after map->end.
* Maps are ordered by start: next will not overlap for sure. * Maps are ordered by start: next will not overlap for sure.
*/ */
if (pos->start >= map->end) if (pos->map->start >= map->end)
break; break;
if (verbose >= 2) { if (verbose >= 2) {
...@@ -262,7 +302,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) ...@@ -262,7 +302,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
} else { } else {
fputs("overlapping maps:\n", fp); fputs("overlapping maps:\n", fp);
map__fprintf(map, fp); map__fprintf(map, fp);
map__fprintf(pos, fp); map__fprintf(pos->map, fp);
} }
} }
...@@ -271,8 +311,8 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) ...@@ -271,8 +311,8 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
* Now check if we need to create new maps for areas not * Now check if we need to create new maps for areas not
* overlapped by the new map: * overlapped by the new map:
*/ */
if (map->start > pos->start) { if (map->start > pos->map->start) {
struct map *before = map__clone(pos); struct map *before = map__clone(pos->map);
if (before == NULL) { if (before == NULL) {
err = -ENOMEM; err = -ENOMEM;
...@@ -280,14 +320,17 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) ...@@ -280,14 +320,17 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
} }
before->end = map->start; before->end = map->start;
__maps__insert(maps, before); err = __maps__insert(maps, before);
if (err)
goto put_map;
if (verbose >= 2 && !use_browser) if (verbose >= 2 && !use_browser)
map__fprintf(before, fp); map__fprintf(before, fp);
map__put(before); map__put(before);
} }
if (map->end < pos->end) { if (map->end < pos->map->end) {
struct map *after = map__clone(pos); struct map *after = map__clone(pos->map);
if (after == NULL) { if (after == NULL) {
err = -ENOMEM; err = -ENOMEM;
...@@ -295,15 +338,19 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) ...@@ -295,15 +338,19 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
} }
after->start = map->end; after->start = map->end;
after->pgoff += map->end - pos->start; after->pgoff += map->end - pos->map->start;
assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end)); assert(pos->map->map_ip(pos->map, map->end) ==
__maps__insert(maps, after); after->map_ip(after, map->end));
err = __maps__insert(maps, after);
if (err)
goto put_map;
if (verbose >= 2 && !use_browser) if (verbose >= 2 && !use_browser)
map__fprintf(after, fp); map__fprintf(after, fp);
map__put(after); map__put(after);
} }
put_map: put_map:
map__put(pos); map__put(pos->map);
if (err) if (err)
goto out; goto out;
...@@ -322,12 +369,12 @@ int maps__clone(struct thread *thread, struct maps *parent) ...@@ -322,12 +369,12 @@ int maps__clone(struct thread *thread, struct maps *parent)
{ {
struct maps *maps = thread->maps; struct maps *maps = thread->maps;
int err; int err;
struct map *map; struct map_rb_node *rb_node;
down_read(&parent->lock); down_read(&parent->lock);
maps__for_each_entry(parent, map) { maps__for_each_entry(parent, rb_node) {
struct map *new = map__clone(map); struct map *new = map__clone(rb_node->map);
if (new == NULL) { if (new == NULL) {
err = -ENOMEM; err = -ENOMEM;
...@@ -338,7 +385,10 @@ int maps__clone(struct thread *thread, struct maps *parent) ...@@ -338,7 +385,10 @@ int maps__clone(struct thread *thread, struct maps *parent)
if (err) if (err)
goto out_unlock; goto out_unlock;
maps__insert(maps, new); err = maps__insert(maps, new);
if (err)
goto out_unlock;
map__put(new); map__put(new);
} }
...@@ -348,40 +398,31 @@ int maps__clone(struct thread *thread, struct maps *parent) ...@@ -348,40 +398,31 @@ int maps__clone(struct thread *thread, struct maps *parent)
return err; return err;
} }
static void __maps__insert(struct maps *maps, struct map *map) struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
{ {
struct rb_node **p = &maps->entries.rb_node; struct map_rb_node *rb_node;
struct rb_node *parent = NULL;
const u64 ip = map->start;
struct map *m;
while (*p != NULL) { maps__for_each_entry(maps, rb_node) {
parent = *p; if (rb_node->map == map)
m = rb_entry(parent, struct map, rb_node); return rb_node;
if (ip < m->start)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
} }
return NULL;
rb_link_node(&map->rb_node, parent, p);
rb_insert_color(&map->rb_node, &maps->entries);
map__get(map);
} }
struct map *maps__find(struct maps *maps, u64 ip) struct map *maps__find(struct maps *maps, u64 ip)
{ {
struct rb_node *p; struct rb_node *p;
struct map *m; struct map_rb_node *m;
down_read(&maps->lock); down_read(&maps->lock);
p = maps->entries.rb_node; p = maps->entries.rb_node;
while (p != NULL) { while (p != NULL) {
m = rb_entry(p, struct map, rb_node); m = rb_entry(p, struct map_rb_node, rb_node);
if (ip < m->start) if (ip < m->map->start)
p = p->rb_left; p = p->rb_left;
else if (ip >= m->end) else if (ip >= m->map->end)
p = p->rb_right; p = p->rb_right;
else else
goto out; goto out;
...@@ -390,14 +431,29 @@ struct map *maps__find(struct maps *maps, u64 ip) ...@@ -390,14 +431,29 @@ struct map *maps__find(struct maps *maps, u64 ip)
m = NULL; m = NULL;
out: out:
up_read(&maps->lock); up_read(&maps->lock);
return m; return m ? m->map : NULL;
} }
struct map *maps__first(struct maps *maps) struct map_rb_node *maps__first(struct maps *maps)
{ {
struct rb_node *first = rb_first(&maps->entries); struct rb_node *first = rb_first(&maps->entries);
if (first) if (first)
return rb_entry(first, struct map, rb_node); return rb_entry(first, struct map_rb_node, rb_node);
return NULL; return NULL;
} }
struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
{
struct rb_node *next;
if (!node)
return NULL;
next = rb_next(&node->rb_node);
if (!next)
return NULL;
return rb_entry(next, struct map_rb_node, rb_node);
}
...@@ -15,15 +15,22 @@ struct map; ...@@ -15,15 +15,22 @@ struct map;
struct maps; struct maps;
struct thread; struct thread;
struct map_rb_node {
struct rb_node rb_node;
struct map *map;
};
struct map_rb_node *maps__first(struct maps *maps);
struct map_rb_node *map_rb_node__next(struct map_rb_node *node);
struct map_rb_node *maps__find_node(struct maps *maps, struct map *map);
struct map *maps__find(struct maps *maps, u64 addr); struct map *maps__find(struct maps *maps, u64 addr);
struct map *maps__first(struct maps *maps);
struct map *map__next(struct map *map);
#define maps__for_each_entry(maps, map) \ #define maps__for_each_entry(maps, map) \
for (map = maps__first(maps); map; map = map__next(map)) for (map = maps__first(maps); map; map = map_rb_node__next(map))
#define maps__for_each_entry_safe(maps, map, next) \ #define maps__for_each_entry_safe(maps, map, next) \
for (map = maps__first(maps), next = map__next(map); map; map = next, next = map__next(map)) for (map = maps__first(maps), next = map_rb_node__next(map); map; \
map = next, next = map_rb_node__next(map))
struct maps { struct maps {
struct rb_root entries; struct rb_root entries;
...@@ -63,7 +70,7 @@ void maps__put(struct maps *maps); ...@@ -63,7 +70,7 @@ void maps__put(struct maps *maps);
int maps__clone(struct thread *thread, struct maps *parent); int maps__clone(struct thread *thread, struct maps *parent);
size_t maps__fprintf(struct maps *maps, FILE *fp); size_t maps__fprintf(struct maps *maps, FILE *fp);
void maps__insert(struct maps *maps, struct map *map); int maps__insert(struct maps *maps, struct map *map);
void maps__remove(struct maps *maps, struct map *map); void maps__remove(struct maps *maps, struct map *map);
......
...@@ -151,23 +151,27 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, ...@@ -151,23 +151,27 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
static struct map *kernel_get_module_map(const char *module) static struct map *kernel_get_module_map(const char *module)
{ {
struct maps *maps = machine__kernel_maps(host_machine); struct maps *maps = machine__kernel_maps(host_machine);
struct map *pos; struct map_rb_node *pos;
/* A file path -- this is an offline module */ /* A file path -- this is an offline module */
if (module && strchr(module, '/')) if (module && strchr(module, '/'))
return dso__new_map(module); return dso__new_map(module);
if (!module) { if (!module) {
pos = machine__kernel_map(host_machine); struct map *map = machine__kernel_map(host_machine);
return map__get(pos);
return map__get(map);
} }
maps__for_each_entry(maps, pos) { maps__for_each_entry(maps, pos) {
/* short_name is "[module]" */ /* short_name is "[module]" */
if (strncmp(pos->dso->short_name + 1, module, const char *short_name = pos->map->dso->short_name;
pos->dso->short_name_len - 2) == 0 && u16 short_name_len = pos->map->dso->short_name_len;
module[pos->dso->short_name_len - 2] == '\0') {
return map__get(pos); if (strncmp(short_name + 1, module,
short_name_len - 2) == 0 &&
module[short_name_len - 2] == '\0') {
return map__get(pos->map);
} }
} }
return NULL; return NULL;
......
...@@ -1361,10 +1361,14 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, ...@@ -1361,10 +1361,14 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
map->unmap_ip = map__unmap_ip; map->unmap_ip = map__unmap_ip;
/* Ensure maps are correctly ordered */ /* Ensure maps are correctly ordered */
if (kmaps) { if (kmaps) {
int err;
map__get(map); map__get(map);
maps__remove(kmaps, map); maps__remove(kmaps, map);
maps__insert(kmaps, map); err = maps__insert(kmaps, map);
map__put(map); map__put(map);
if (err)
return err;
} }
} }
...@@ -1417,7 +1421,8 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, ...@@ -1417,7 +1421,8 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
} }
curr_dso->symtab_type = dso->symtab_type; curr_dso->symtab_type = dso->symtab_type;
maps__insert(kmaps, curr_map); if (maps__insert(kmaps, curr_map))
return -1;
/* /*
* Add it before we drop the reference to curr_map, i.e. while * Add it before we drop the reference to curr_map, i.e. while
* we still are sure to have a reference to this DSO via * we still are sure to have a reference to this DSO via
......
...@@ -273,13 +273,13 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) ...@@ -273,13 +273,13 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
void maps__fixup_end(struct maps *maps) void maps__fixup_end(struct maps *maps)
{ {
struct map *prev = NULL, *curr; struct map_rb_node *prev = NULL, *curr;
down_write(&maps->lock); down_write(&maps->lock);
maps__for_each_entry(maps, curr) { maps__for_each_entry(maps, curr) {
if (prev != NULL && !prev->end) if (prev != NULL && !prev->map->end)
prev->end = curr->start; prev->map->end = curr->map->start;
prev = curr; prev = curr;
} }
...@@ -288,8 +288,8 @@ void maps__fixup_end(struct maps *maps) ...@@ -288,8 +288,8 @@ void maps__fixup_end(struct maps *maps)
* We still haven't the actual symbols, so guess the * We still haven't the actual symbols, so guess the
* last map final address. * last map final address.
*/ */
if (curr && !curr->end) if (curr && !curr->map->end)
curr->end = ~0ULL; curr->map->end = ~0ULL;
up_write(&maps->lock); up_write(&maps->lock);
} }
...@@ -942,7 +942,10 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, ...@@ -942,7 +942,10 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
} }
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
maps__insert(kmaps, curr_map); if (maps__insert(kmaps, curr_map)) {
dso__put(ndso);
return -1;
}
++kernel_range; ++kernel_range;
} else if (delta) { } else if (delta) {
/* Kernel was relocated at boot time */ /* Kernel was relocated at boot time */
...@@ -1130,14 +1133,15 @@ int compare_proc_modules(const char *from, const char *to) ...@@ -1130,14 +1133,15 @@ int compare_proc_modules(const char *from, const char *to)
static int do_validate_kcore_modules(const char *filename, struct maps *kmaps) static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
{ {
struct rb_root modules = RB_ROOT; struct rb_root modules = RB_ROOT;
struct map *old_map; struct map_rb_node *old_node;
int err; int err;
err = read_proc_modules(filename, &modules); err = read_proc_modules(filename, &modules);
if (err) if (err)
return err; return err;
maps__for_each_entry(kmaps, old_map) { maps__for_each_entry(kmaps, old_node) {
struct map *old_map = old_node->map;
struct module_info *mi; struct module_info *mi;
if (!__map__is_kmodule(old_map)) { if (!__map__is_kmodule(old_map)) {
...@@ -1254,10 +1258,13 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) ...@@ -1254,10 +1258,13 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
*/ */
int maps__merge_in(struct maps *kmaps, struct map *new_map) int maps__merge_in(struct maps *kmaps, struct map *new_map)
{ {
struct map *old_map; struct map_rb_node *rb_node;
LIST_HEAD(merged); LIST_HEAD(merged);
int err = 0;
maps__for_each_entry(kmaps, rb_node) {
struct map *old_map = rb_node->map;
maps__for_each_entry(kmaps, old_map) {
/* no overload with this one */ /* no overload with this one */
if (new_map->end < old_map->start || if (new_map->end < old_map->start ||
new_map->start >= old_map->end) new_map->start >= old_map->end)
...@@ -1281,13 +1288,16 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map) ...@@ -1281,13 +1288,16 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
*/ */
struct map_list_node *m = map_list_node__new(); struct map_list_node *m = map_list_node__new();
if (!m) if (!m) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
m->map = map__clone(new_map); m->map = map__clone(new_map);
if (!m->map) { if (!m->map) {
free(m); free(m);
return -ENOMEM; err = -ENOMEM;
goto out;
} }
m->map->end = old_map->start; m->map->end = old_map->start;
...@@ -1319,21 +1329,24 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map) ...@@ -1319,21 +1329,24 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
} }
} }
out:
while (!list_empty(&merged)) { while (!list_empty(&merged)) {
struct map_list_node *old_node; struct map_list_node *old_node;
old_node = list_entry(merged.next, struct map_list_node, node); old_node = list_entry(merged.next, struct map_list_node, node);
list_del_init(&old_node->node); list_del_init(&old_node->node);
maps__insert(kmaps, old_node->map); if (!err)
err = maps__insert(kmaps, old_node->map);
map__put(old_node->map); map__put(old_node->map);
free(old_node); free(old_node);
} }
if (new_map) { if (new_map) {
maps__insert(kmaps, new_map); if (!err)
err = maps__insert(kmaps, new_map);
map__put(new_map); map__put(new_map);
} }
return 0; return err;
} }
static int dso__load_kcore(struct dso *dso, struct map *map, static int dso__load_kcore(struct dso *dso, struct map *map,
...@@ -1341,7 +1354,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map, ...@@ -1341,7 +1354,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
{ {
struct maps *kmaps = map__kmaps(map); struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md; struct kcore_mapfn_data md;
struct map *old_map, *replacement_map = NULL, *next; struct map *replacement_map = NULL;
struct map_rb_node *old_node, *next;
struct machine *machine; struct machine *machine;
bool is_64_bit; bool is_64_bit;
int err, fd; int err, fd;
...@@ -1388,7 +1402,9 @@ static int dso__load_kcore(struct dso *dso, struct map *map, ...@@ -1388,7 +1402,9 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
} }
/* Remove old maps */ /* Remove old maps */
maps__for_each_entry_safe(kmaps, old_map, next) { maps__for_each_entry_safe(kmaps, old_node, next) {
struct map *old_map = old_node->map;
/* /*
* We need to preserve eBPF maps even if they are * We need to preserve eBPF maps even if they are
* covered by kcore, because we need to access * covered by kcore, because we need to access
...@@ -1443,18 +1459,22 @@ static int dso__load_kcore(struct dso *dso, struct map *map, ...@@ -1443,18 +1459,22 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
/* Ensure maps are correctly ordered */ /* Ensure maps are correctly ordered */
map__get(map); map__get(map);
maps__remove(kmaps, map); maps__remove(kmaps, map);
maps__insert(kmaps, map); err = maps__insert(kmaps, map);
map__put(map); map__put(map);
map__put(new_map); map__put(new_map);
if (err)
goto out_err;
} else { } else {
/* /*
* Merge kcore map into existing maps, * Merge kcore map into existing maps,
* and ensure that current maps (eBPF) * and ensure that current maps (eBPF)
* stay intact. * stay intact.
*/ */
if (maps__merge_in(kmaps, new_map)) if (maps__merge_in(kmaps, new_map)) {
err = -EINVAL;
goto out_err; goto out_err;
} }
}
free(new_node); free(new_node);
} }
...@@ -1500,7 +1520,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, ...@@ -1500,7 +1520,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
free(list_node); free(list_node);
} }
close(fd); close(fd);
return -EINVAL; return err;
} }
/* /*
...@@ -2044,8 +2064,9 @@ void __maps__sort_by_name(struct maps *maps) ...@@ -2044,8 +2064,9 @@ void __maps__sort_by_name(struct maps *maps)
static int map__groups__sort_by_name_from_rbtree(struct maps *maps) static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
{ {
struct map *map; struct map_rb_node *rb_node;
struct map **maps_by_name = realloc(maps->maps_by_name, maps->nr_maps * sizeof(map)); struct map **maps_by_name = realloc(maps->maps_by_name,
maps->nr_maps * sizeof(struct map *));
int i = 0; int i = 0;
if (maps_by_name == NULL) if (maps_by_name == NULL)
...@@ -2057,8 +2078,8 @@ static int map__groups__sort_by_name_from_rbtree(struct maps *maps) ...@@ -2057,8 +2078,8 @@ static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
maps->maps_by_name = maps_by_name; maps->maps_by_name = maps_by_name;
maps->nr_maps_allocated = maps->nr_maps; maps->nr_maps_allocated = maps->nr_maps;
maps__for_each_entry(maps, map) maps__for_each_entry(maps, rb_node)
maps_by_name[i++] = map; maps_by_name[i++] = rb_node->map;
__maps__sort_by_name(maps); __maps__sort_by_name(maps);
...@@ -2084,6 +2105,7 @@ static struct map *__maps__find_by_name(struct maps *maps, const char *name) ...@@ -2084,6 +2105,7 @@ static struct map *__maps__find_by_name(struct maps *maps, const char *name)
struct map *maps__find_by_name(struct maps *maps, const char *name) struct map *maps__find_by_name(struct maps *maps, const char *name)
{ {
struct map_rb_node *rb_node;
struct map *map; struct map *map;
down_read(&maps->lock); down_read(&maps->lock);
...@@ -2102,12 +2124,13 @@ struct map *maps__find_by_name(struct maps *maps, const char *name) ...@@ -2102,12 +2124,13 @@ struct map *maps__find_by_name(struct maps *maps, const char *name)
goto out_unlock; goto out_unlock;
/* Fallback to traversing the rbtree... */ /* Fallback to traversing the rbtree... */
maps__for_each_entry(maps, map) maps__for_each_entry(maps, rb_node) {
map = rb_node->map;
if (strcmp(map->dso->short_name, name) == 0) { if (strcmp(map->dso->short_name, name) == 0) {
maps->last_search_by_name = map; maps->last_search_by_name = map;
goto out_unlock; goto out_unlock;
} }
}
map = NULL; map = NULL;
out_unlock: out_unlock:
......
...@@ -669,7 +669,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t ...@@ -669,7 +669,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
struct machine *machine) struct machine *machine)
{ {
int rc = 0; int rc = 0;
struct map *pos; struct map_rb_node *pos;
struct maps *maps = machine__kernel_maps(machine); struct maps *maps = machine__kernel_maps(machine);
union perf_event *event; union perf_event *event;
size_t size = symbol_conf.buildid_mmap2 ? size_t size = symbol_conf.buildid_mmap2 ?
...@@ -692,37 +692,39 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t ...@@ -692,37 +692,39 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
maps__for_each_entry(maps, pos) { maps__for_each_entry(maps, pos) {
if (!__map__is_kmodule(pos)) struct map *map = pos->map;
if (!__map__is_kmodule(map))
continue; continue;
if (symbol_conf.buildid_mmap2) { if (symbol_conf.buildid_mmap2) {
size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); size = PERF_ALIGN(map->dso->long_name_len + 1, sizeof(u64));
event->mmap2.header.type = PERF_RECORD_MMAP2; event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.size = (sizeof(event->mmap2) - event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - size)); (sizeof(event->mmap2.filename) - size));
memset(event->mmap2.filename + size, 0, machine->id_hdr_size); memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
event->mmap2.header.size += machine->id_hdr_size; event->mmap2.header.size += machine->id_hdr_size;
event->mmap2.start = pos->start; event->mmap2.start = map->start;
event->mmap2.len = pos->end - pos->start; event->mmap2.len = map->end - map->start;
event->mmap2.pid = machine->pid; event->mmap2.pid = machine->pid;
memcpy(event->mmap2.filename, pos->dso->long_name, memcpy(event->mmap2.filename, map->dso->long_name,
pos->dso->long_name_len + 1); map->dso->long_name_len + 1);
perf_record_mmap2__read_build_id(&event->mmap2, machine, false); perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
} else { } else {
size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); size = PERF_ALIGN(map->dso->long_name_len + 1, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) - event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size)); (sizeof(event->mmap.filename) - size));
memset(event->mmap.filename + size, 0, machine->id_hdr_size); memset(event->mmap.filename + size, 0, machine->id_hdr_size);
event->mmap.header.size += machine->id_hdr_size; event->mmap.header.size += machine->id_hdr_size;
event->mmap.start = pos->start; event->mmap.start = map->start;
event->mmap.len = pos->end - pos->start; event->mmap.len = map->end - map->start;
event->mmap.pid = machine->pid; event->mmap.pid = machine->pid;
memcpy(event->mmap.filename, pos->dso->long_name, memcpy(event->mmap.filename, map->dso->long_name,
pos->dso->long_name_len + 1); map->dso->long_name_len + 1);
} }
if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
......
...@@ -352,9 +352,7 @@ int thread__insert_map(struct thread *thread, struct map *map) ...@@ -352,9 +352,7 @@ int thread__insert_map(struct thread *thread, struct map *map)
return ret; return ret;
maps__fixup_overlappings(thread->maps, map, stderr); maps__fixup_overlappings(thread->maps, map, stderr);
maps__insert(thread->maps, map); return maps__insert(thread->maps, map);
return 0;
} }
static int __thread__prepare_access(struct thread *thread) static int __thread__prepare_access(struct thread *thread)
...@@ -362,12 +360,12 @@ static int __thread__prepare_access(struct thread *thread) ...@@ -362,12 +360,12 @@ static int __thread__prepare_access(struct thread *thread)
bool initialized = false; bool initialized = false;
int err = 0; int err = 0;
struct maps *maps = thread->maps; struct maps *maps = thread->maps;
struct map *map; struct map_rb_node *rb_node;
down_read(&maps->lock); down_read(&maps->lock);
maps__for_each_entry(maps, map) { maps__for_each_entry(maps, rb_node) {
err = unwind__prepare_access(thread->maps, map, &initialized); err = unwind__prepare_access(thread->maps, rb_node->map, &initialized);
if (err || initialized) if (err || initialized)
break; break;
} }
......
...@@ -306,7 +306,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui, ...@@ -306,7 +306,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
u64 *table_data, u64 *segbase, u64 *table_data, u64 *segbase,
u64 *fde_count) u64 *fde_count)
{ {
struct map *map; struct map_rb_node *map_node;
u64 base_addr = UINT64_MAX; u64 base_addr = UINT64_MAX;
int ret, fd; int ret, fd;
...@@ -325,7 +325,9 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui, ...@@ -325,7 +325,9 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
return -EINVAL; return -EINVAL;
} }
maps__for_each_entry(ui->thread->maps, map) { maps__for_each_entry(ui->thread->maps, map_node) {
struct map *map = map_node->map;
if (map->dso == dso && map->start < base_addr) if (map->dso == dso && map->start < base_addr)
base_addr = map->start; base_addr = map->start;
} }
......
...@@ -144,10 +144,11 @@ static enum dso_type machine__thread_dso_type(struct machine *machine, ...@@ -144,10 +144,11 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
struct thread *thread) struct thread *thread)
{ {
enum dso_type dso_type = DSO__TYPE_UNKNOWN; enum dso_type dso_type = DSO__TYPE_UNKNOWN;
struct map *map; struct map_rb_node *rb_node;
maps__for_each_entry(thread->maps, rb_node) {
struct dso *dso = rb_node->map->dso;
maps__for_each_entry(thread->maps, map) {
struct dso *dso = map->dso;
if (!dso || dso->long_name[0] != '/') if (!dso || dso->long_name[0] != '/')
continue; continue;
dso_type = dso__type(dso, machine); dso_type = dso__type(dso, machine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment