Commit 5ab6d715 authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf maps: Add functions to access maps

Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.

Committer notes:

Fixed up tools/perf/util/unwind-libunwind.c:

-               return ops->get_entries(cb, arg, thread, data, max_stack);
+               return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent ff583dc4
......@@ -100,10 +100,11 @@ static PyObject *perf_sample_insn(PyObject *obj, PyObject *args)
if (!c)
return NULL;
if (c->sample->ip && !c->sample->insn_len &&
c->al->thread->maps && c->al->thread->maps->machine)
script_fetch_insn(c->sample, c->al->thread, c->al->thread->maps->machine);
if (c->sample->ip && !c->sample->insn_len && c->al->thread->maps) {
struct machine *machine = maps__machine(c->al->thread->maps);
script_fetch_insn(c->sample, c->al->thread, machine);
}
if (!c->sample->insn_len)
Py_RETURN_NONE; /* N.B. This is a return statement */
......
......@@ -269,7 +269,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
len = al.map->end - addr;
/* Read the object code using perf */
ret_len = dso__data_read_offset(al.map->dso, thread->maps->machine,
ret_len = dso__data_read_offset(al.map->dso, maps__machine(thread->maps),
al.addr, buf1, len);
if (ret_len != len) {
pr_debug("dso__data_read_offset failed\n");
......
......@@ -3139,7 +3139,8 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h
continue;
case 'k':
if (browser->selection != NULL)
hists_browser__zoom_map(browser, browser->selection->maps->machine->vmlinux_map);
hists_browser__zoom_map(browser,
maps__machine(browser->selection->maps)->vmlinux_map);
continue;
case 'V':
verbose = (verbose + 1) % 4;
......
......@@ -1112,6 +1112,8 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
bool hide_unresolved)
{
struct machine *machine = maps__machine(node->ms.maps);
al->maps = node->ms.maps;
al->map = node->ms.map;
al->sym = node->ms.sym;
......@@ -1124,9 +1126,8 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
if (al->map == NULL)
goto out;
}
if (al->maps == machine__kernel_maps(al->maps->machine)) {
if (machine__is_host(al->maps->machine)) {
if (al->maps == machine__kernel_maps(machine)) {
if (machine__is_host(machine)) {
al->cpumode = PERF_RECORD_MISC_KERNEL;
al->level = 'k';
} else {
......@@ -1134,7 +1135,7 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
al->level = 'g';
}
} else {
if (machine__is_host(al->maps->machine)) {
if (machine__is_host(machine)) {
al->cpumode = PERF_RECORD_MISC_USER;
al->level = '.';
} else if (perf_guest) {
......
......@@ -181,7 +181,7 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
if (al->map) {
struct dso *dso = al->map->dso;
err = db_export__dso(dbe, dso, al->maps->machine);
err = db_export__dso(dbe, dso, maps__machine(al->maps));
if (err)
return err;
*dso_db_id = dso->db_id;
......@@ -354,19 +354,21 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
};
struct thread *main_thread;
struct comm *comm = NULL;
struct machine *machine;
int err;
err = db_export__evsel(dbe, evsel);
if (err)
return err;
err = db_export__machine(dbe, al->maps->machine);
machine = maps__machine(al->maps);
err = db_export__machine(dbe, machine);
if (err)
return err;
main_thread = thread__main_thread(al->maps->machine, thread);
main_thread = thread__main_thread(machine, thread);
err = db_export__threads(dbe, thread, main_thread, al->maps->machine, &comm);
err = db_export__threads(dbe, thread, main_thread, machine, &comm);
if (err)
goto out_put;
......@@ -380,7 +382,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
goto out_put;
if (dbe->cpr) {
struct call_path *cp = call_path_from_sample(dbe, al->maps->machine,
struct call_path *cp = call_path_from_sample(dbe, machine,
thread, sample,
evsel);
if (cp) {
......
......@@ -197,8 +197,12 @@ static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
return NULL;
if (al->thread->maps && al->thread->maps->machine)
script_fetch_insn(d->sample, al->thread, al->thread->maps->machine);
if (al->thread->maps) {
struct machine *machine = maps__machine(al->thread->maps);
if (machine)
script_fetch_insn(d->sample, al->thread, machine);
}
}
if (!d->sample->insn_len)
......
......@@ -572,7 +572,7 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
struct maps *maps = thread->maps;
struct machine *machine = maps->machine;
struct machine *machine = maps__machine(maps);
bool load_map = false;
al->maps = maps;
......@@ -637,7 +637,7 @@ struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
struct map *map = thread__find_map(thread, cpumode, addr, al);
struct machine *machine = thread->maps->machine;
struct machine *machine = maps__machine(thread->maps);
u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
if (map || addr_cpumode == cpumode)
......
......@@ -241,7 +241,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
if (h->cgroup) {
const char *cgrp_name = "unknown";
struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
h->cgroup);
if (cgrp != NULL)
cgrp_name = cgrp->name;
......
......@@ -2847,7 +2847,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
static u64 get_leaf_frame_caller(struct perf_sample *sample,
struct thread *thread, int usr_idx)
{
if (machine__normalized_is(thread->maps->machine, "arm64"))
if (machine__normalized_is(maps__machine(thread->maps), "arm64"))
return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
else
return 0;
......
......@@ -234,7 +234,7 @@ bool __map__is_kernel(const struct map *map)
{
if (!map->dso->kernel)
return false;
return machine__kernel_map(map__kmaps((struct map *)map)->machine) == map;
return machine__kernel_map(maps__machine(map__kmaps((struct map *)map))) == map;
}
bool __map__is_extra_kernel_map(const struct map *map)
......@@ -475,11 +475,15 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
* kcore may not either. However the trampoline object code is on the
* main kernel map, so just use that instead.
*/
if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps) {
struct machine *machine = maps__machine(kmap->kmaps);
if (kernel_map)
map = kernel_map;
if (machine) {
struct map *kernel_map = machine__kernel_map(machine);
if (kernel_map)
map = kernel_map;
}
}
if (!map->dso->adjust_symbols)
......
......@@ -13,7 +13,7 @@
static void maps__init(struct maps *maps, struct machine *machine)
{
maps->entries = RB_ROOT;
init_rwsem(&maps->lock);
init_rwsem(maps__lock(maps));
maps->machine = machine;
maps->last_search_by_name = NULL;
maps->nr_maps = 0;
......@@ -32,7 +32,7 @@ static void __maps__free_maps_by_name(struct maps *maps)
static int __maps__insert(struct maps *maps, struct map *map)
{
struct rb_node **p = &maps->entries.rb_node;
struct rb_node **p = &maps__entries(maps)->rb_node;
struct rb_node *parent = NULL;
const u64 ip = map->start;
struct map_rb_node *m, *new_rb_node;
......@@ -54,7 +54,7 @@ static int __maps__insert(struct maps *maps, struct map *map)
}
rb_link_node(&new_rb_node->rb_node, parent, p);
rb_insert_color(&new_rb_node->rb_node, &maps->entries);
rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
map__get(map);
return 0;
}
......@@ -63,7 +63,7 @@ int maps__insert(struct maps *maps, struct map *map)
{
int err;
down_write(&maps->lock);
down_write(maps__lock(maps));
err = __maps__insert(maps, map);
if (err)
goto out;
......@@ -84,10 +84,11 @@ int maps__insert(struct maps *maps, struct map *map)
* If we already performed some search by name, then we need to add the just
* inserted map and resort.
*/
if (maps->maps_by_name) {
if (maps->nr_maps > maps->nr_maps_allocated) {
int nr_allocate = maps->nr_maps * 2;
struct map **maps_by_name = realloc(maps->maps_by_name, nr_allocate * sizeof(map));
if (maps__maps_by_name(maps)) {
if (maps__nr_maps(maps) > maps->nr_maps_allocated) {
int nr_allocate = maps__nr_maps(maps) * 2;
struct map **maps_by_name = realloc(maps__maps_by_name(maps),
nr_allocate * sizeof(map));
if (maps_by_name == NULL) {
__maps__free_maps_by_name(maps);
......@@ -97,18 +98,18 @@ int maps__insert(struct maps *maps, struct map *map)
maps->maps_by_name = maps_by_name;
maps->nr_maps_allocated = nr_allocate;
}
maps->maps_by_name[maps->nr_maps - 1] = map;
}
maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map;
__maps__sort_by_name(maps);
}
out:
up_write(&maps->lock);
up_write(maps__lock(maps));
return err;
}
static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
{
rb_erase_init(&rb_node->rb_node, &maps->entries);
rb_erase_init(&rb_node->rb_node, maps__entries(maps));
map__put(rb_node->map);
free(rb_node);
}
......@@ -117,7 +118,7 @@ void maps__remove(struct maps *maps, struct map *map)
{
struct map_rb_node *rb_node;
down_write(&maps->lock);
down_write(maps__lock(maps));
if (maps->last_search_by_name == map)
maps->last_search_by_name = NULL;
......@@ -125,9 +126,9 @@ void maps__remove(struct maps *maps, struct map *map)
assert(rb_node->map == map);
__maps__remove(maps, rb_node);
--maps->nr_maps;
if (maps->maps_by_name)
if (maps__maps_by_name(maps))
__maps__free_maps_by_name(maps);
up_write(&maps->lock);
up_write(maps__lock(maps));
}
static void __maps__purge(struct maps *maps)
......@@ -135,7 +136,7 @@ static void __maps__purge(struct maps *maps)
struct map_rb_node *pos, *next;
maps__for_each_entry_safe(maps, pos, next) {
rb_erase_init(&pos->rb_node, &maps->entries);
rb_erase_init(&pos->rb_node, maps__entries(maps));
map__put(pos->map);
free(pos);
}
......@@ -143,9 +144,9 @@ static void __maps__purge(struct maps *maps)
static void maps__exit(struct maps *maps)
{
down_write(&maps->lock);
down_write(maps__lock(maps));
__maps__purge(maps);
up_write(&maps->lock);
up_write(maps__lock(maps));
}
bool maps__empty(struct maps *maps)
......@@ -170,6 +171,14 @@ void maps__delete(struct maps *maps)
free(maps);
}
struct maps *maps__get(struct maps *maps)
{
if (maps)
refcount_inc(&maps->refcnt);
return maps;
}
void maps__put(struct maps *maps)
{
if (maps && refcount_dec_and_test(&maps->refcnt))
......@@ -195,7 +204,7 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, st
struct symbol *sym;
struct map_rb_node *pos;
down_read(&maps->lock);
down_read(maps__lock(maps));
maps__for_each_entry(maps, pos) {
sym = map__find_symbol_by_name(pos->map, name);
......@@ -213,7 +222,7 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, st
sym = NULL;
out:
up_read(&maps->lock);
up_read(maps__lock(maps));
return sym;
}
......@@ -238,7 +247,7 @@ size_t maps__fprintf(struct maps *maps, FILE *fp)
size_t printed = 0;
struct map_rb_node *pos;
down_read(&maps->lock);
down_read(maps__lock(maps));
maps__for_each_entry(maps, pos) {
printed += fprintf(fp, "Map:");
......@@ -249,7 +258,7 @@ size_t maps__fprintf(struct maps *maps, FILE *fp)
}
}
up_read(&maps->lock);
up_read(maps__lock(maps));
return printed;
}
......@@ -260,9 +269,9 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
struct rb_node *next, *first;
int err = 0;
down_write(&maps->lock);
down_write(maps__lock(maps));
root = &maps->entries;
root = maps__entries(maps);
/*
* Find first map where end > map->start.
......@@ -358,7 +367,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
err = 0;
out:
up_write(&maps->lock);
up_write(maps__lock(maps));
return err;
}
......@@ -371,7 +380,7 @@ int maps__clone(struct thread *thread, struct maps *parent)
int err;
struct map_rb_node *rb_node;
down_read(&parent->lock);
down_read(maps__lock(parent));
maps__for_each_entry(parent, rb_node) {
struct map *new = map__clone(rb_node->map);
......@@ -394,7 +403,7 @@ int maps__clone(struct thread *thread, struct maps *parent)
err = 0;
out_unlock:
up_read(&parent->lock);
up_read(maps__lock(parent));
return err;
}
......@@ -415,9 +424,9 @@ struct map *maps__find(struct maps *maps, u64 ip)
struct map_rb_node *m;
down_read(&maps->lock);
down_read(maps__lock(maps));
p = maps->entries.rb_node;
p = maps__entries(maps)->rb_node;
while (p != NULL) {
m = rb_entry(p, struct map_rb_node, rb_node);
if (ip < m->map->start)
......@@ -430,13 +439,13 @@ struct map *maps__find(struct maps *maps, u64 ip)
m = NULL;
out:
up_read(&maps->lock);
up_read(maps__lock(maps));
return m ? m->map : NULL;
}
struct map_rb_node *maps__first(struct maps *maps)
{
struct rb_node *first = rb_first(&maps->entries);
struct rb_node *first = rb_first(maps__entries(maps));
if (first)
return rb_entry(first, struct map_rb_node, rb_node);
......
......@@ -43,7 +43,7 @@ struct maps {
unsigned int nr_maps_allocated;
#ifdef HAVE_LIBUNWIND_SUPPORT
void *addr_space;
struct unwind_libunwind_ops *unwind_libunwind_ops;
const struct unwind_libunwind_ops *unwind_libunwind_ops;
#endif
};
......@@ -58,20 +58,51 @@ struct kmap {
struct maps *maps__new(struct machine *machine);
void maps__delete(struct maps *maps);
bool maps__empty(struct maps *maps);
int maps__clone(struct thread *thread, struct maps *parent);
struct maps *maps__get(struct maps *maps);
void maps__put(struct maps *maps);
static inline struct maps *maps__get(struct maps *maps)
static inline struct rb_root *maps__entries(struct maps *maps)
{
if (maps)
refcount_inc(&maps->refcnt);
return maps;
return &maps->entries;
}
void maps__put(struct maps *maps);
int maps__clone(struct thread *thread, struct maps *parent);
static inline struct machine *maps__machine(struct maps *maps)
{
return maps->machine;
}
static inline struct rw_semaphore *maps__lock(struct maps *maps)
{
return &maps->lock;
}
static inline struct map **maps__maps_by_name(struct maps *maps)
{
return maps->maps_by_name;
}
static inline unsigned int maps__nr_maps(const struct maps *maps)
{
return maps->nr_maps;
}
#ifdef HAVE_LIBUNWIND_SUPPORT
static inline void *maps__addr_space(struct maps *maps)
{
return maps->addr_space;
}
static inline const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
{
return maps->unwind_libunwind_ops;
}
#endif
size_t maps__fprintf(struct maps *maps, FILE *fp);
int maps__insert(struct maps *maps, struct map *map);
void maps__remove(struct maps *maps, struct map *map);
struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp);
......
......@@ -1288,7 +1288,7 @@ static void python_export_sample_table(struct db_export *dbe,
tuple_set_d64(t, 0, es->db_id);
tuple_set_d64(t, 1, es->evsel->db_id);
tuple_set_d64(t, 2, es->al->maps->machine->db_id);
tuple_set_d64(t, 2, maps__machine(es->al->maps)->db_id);
tuple_set_d64(t, 3, es->al->thread->db_id);
tuple_set_d64(t, 4, es->comm_db_id);
tuple_set_d64(t, 5, es->dso_db_id);
......
......@@ -803,7 +803,7 @@ static int hist_entry__cgroup_snprintf(struct hist_entry *he,
const char *cgrp_name = "N/A";
if (he->cgroup) {
struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
he->cgroup);
if (cgrp != NULL)
cgrp_name = cgrp->name;
......
......@@ -1428,7 +1428,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
* we still are sure to have a reference to this DSO via
* *curr_map->dso.
*/
dsos__add(&kmaps->machine->dsos, curr_dso);
dsos__add(&maps__machine(kmaps)->dsos, curr_dso);
/* kmaps already got it */
map__put(curr_map);
dso__set_loaded(curr_dso);
......
......@@ -275,7 +275,7 @@ void maps__fixup_end(struct maps *maps)
{
struct map_rb_node *prev = NULL, *curr;
down_write(&maps->lock);
down_write(maps__lock(maps));
maps__for_each_entry(maps, curr) {
if (prev != NULL && !prev->map->end)
......@@ -291,7 +291,7 @@ void maps__fixup_end(struct maps *maps)
if (curr && !curr->map->end)
curr->map->end = ~0ULL;
up_write(&maps->lock);
up_write(maps__lock(maps));
}
struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
......@@ -844,7 +844,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
if (!kmaps)
return -1;
machine = kmaps->machine;
machine = maps__machine(kmaps);
x86_64 = machine__is(machine, "x86_64");
......@@ -968,7 +968,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
if (curr_map != initial_map &&
dso->kernel == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(kmaps->machine)) {
machine__is_default_guest(maps__machine(kmaps))) {
dso__set_loaded(curr_map->dso);
}
......@@ -1365,7 +1365,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
if (!kmaps)
return -EINVAL;
machine = kmaps->machine;
machine = maps__machine(kmaps);
/* This function requires that the map is the kernel map */
if (!__map__is_kernel(map))
......@@ -1894,7 +1894,7 @@ int dso__load(struct dso *dso, struct map *map)
else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
ret = dso__load_guest_kernel_sym(dso, map);
machine = map__kmaps(map)->machine;
machine = maps__machine(map__kmaps(map));
if (machine__is(machine, "x86_64"))
machine__map_x86_64_entry_trampolines(machine, dso);
goto out;
......@@ -2059,32 +2059,32 @@ static int map__strcmp_name(const void *name, const void *b)
void __maps__sort_by_name(struct maps *maps)
{
qsort(maps->maps_by_name, maps->nr_maps, sizeof(struct map *), map__strcmp);
qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
}
static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
{
struct map_rb_node *rb_node;
struct map **maps_by_name = realloc(maps->maps_by_name,
maps->nr_maps * sizeof(struct map *));
struct map **maps_by_name = realloc(maps__maps_by_name(maps),
maps__nr_maps(maps) * sizeof(struct map *));
int i = 0;
if (maps_by_name == NULL)
return -1;
up_read(&maps->lock);
down_write(&maps->lock);
up_read(maps__lock(maps));
down_write(maps__lock(maps));
maps->maps_by_name = maps_by_name;
maps->nr_maps_allocated = maps->nr_maps;
maps->nr_maps_allocated = maps__nr_maps(maps);
maps__for_each_entry(maps, rb_node)
maps_by_name[i++] = rb_node->map;
__maps__sort_by_name(maps);
up_write(&maps->lock);
down_read(&maps->lock);
up_write(maps__lock(maps));
down_read(maps__lock(maps));
return 0;
}
......@@ -2093,11 +2093,12 @@ static struct map *__maps__find_by_name(struct maps *maps, const char *name)
{
struct map **mapp;
if (maps->maps_by_name == NULL &&
if (maps__maps_by_name(maps) == NULL &&
map__groups__sort_by_name_from_rbtree(maps))
return NULL;
mapp = bsearch(name, maps->maps_by_name, maps->nr_maps, sizeof(*mapp), map__strcmp_name);
mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
sizeof(*mapp), map__strcmp_name);
if (mapp)
return *mapp;
return NULL;
......@@ -2108,9 +2109,10 @@ struct map *maps__find_by_name(struct maps *maps, const char *name)
struct map_rb_node *rb_node;
struct map *map;
down_read(&maps->lock);
down_read(maps__lock(maps));
if (maps->last_search_by_name && strcmp(maps->last_search_by_name->dso->short_name, name) == 0) {
if (maps->last_search_by_name &&
strcmp(maps->last_search_by_name->dso->short_name, name) == 0) {
map = maps->last_search_by_name;
goto out_unlock;
}
......@@ -2120,7 +2122,7 @@ struct map *maps__find_by_name(struct maps *maps, const char *name)
* made.
*/
map = __maps__find_by_name(maps, name);
if (map || maps->maps_by_name != NULL)
if (map || maps__maps_by_name(maps) != NULL)
goto out_unlock;
/* Fallback to traversing the rbtree... */
......@@ -2134,7 +2136,7 @@ struct map *maps__find_by_name(struct maps *maps, const char *name)
map = NULL;
out_unlock:
up_read(&maps->lock);
up_read(maps__lock(maps));
return map;
}
......@@ -2386,7 +2388,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
{
int err;
const char *kallsyms_filename;
struct machine *machine = map__kmaps(map)->machine;
struct machine *machine = maps__machine(map__kmaps(map));
char path[PATH_MAX];
if (machine->kallsyms_filename) {
......
......@@ -155,8 +155,8 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
ts->br_stack_sz = br_stack_sz;
}
if (thread->maps && thread->maps->machine) {
struct machine *machine = thread->maps->machine;
if (thread->maps && maps__machine(thread->maps)) {
struct machine *machine = maps__machine(thread->maps);
const char *arch = perf_env__arch(machine->env);
ts->kernel_start = machine__kernel_start(machine);
......
......@@ -362,7 +362,7 @@ static int __thread__prepare_access(struct thread *thread)
struct maps *maps = thread->maps;
struct map_rb_node *rb_node;
down_read(&maps->lock);
down_read(maps__lock(maps));
maps__for_each_entry(maps, rb_node) {
err = unwind__prepare_access(thread->maps, rb_node->map, &initialized);
......@@ -370,7 +370,7 @@ static int __thread__prepare_access(struct thread *thread)
break;
}
up_read(&maps->lock);
up_read(maps__lock(maps));
return err;
}
......
......@@ -667,24 +667,26 @@ static unw_accessors_t accessors = {
static int _unwind__prepare_access(struct maps *maps)
{
maps->addr_space = unw_create_addr_space(&accessors, 0);
if (!maps->addr_space) {
void *addr_space = unw_create_addr_space(&accessors, 0);
maps->addr_space = addr_space;
if (!addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
return -ENOMEM;
}
unw_set_caching_policy(maps->addr_space, UNW_CACHE_GLOBAL);
unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL);
return 0;
}
static void _unwind__flush_access(struct maps *maps)
{
unw_flush_cache(maps->addr_space, 0, 0);
unw_flush_cache(maps__addr_space(maps), 0, 0);
}
static void _unwind__finish_access(struct maps *maps)
{
unw_destroy_addr_space(maps->addr_space);
unw_destroy_addr_space(maps__addr_space(maps));
}
static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
......@@ -709,7 +711,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
*/
if (max_stack - 1 > 0) {
WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
addr_space = ui->thread->maps->addr_space;
addr_space = maps__addr_space(ui->thread->maps);
if (addr_space == NULL)
return -1;
......@@ -759,7 +761,7 @@ static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct unwind_info ui = {
.sample = data,
.thread = thread,
.machine = thread->maps->machine,
.machine = maps__machine(thread->maps),
.best_effort = best_effort
};
......
......@@ -22,12 +22,13 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized
const char *arch;
enum dso_type dso_type;
struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
struct machine *machine;
int err;
if (!dwarf_callchain_users)
return 0;
if (maps->addr_space) {
if (maps__addr_space(maps)) {
pr_debug("unwind: thread map already set, dso=%s\n",
map->dso->name);
if (initialized)
......@@ -35,15 +36,16 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized
return 0;
}
machine = maps__machine(maps);
/* env->arch is NULL for live-mode (i.e. perf top) */
if (!maps->machine->env || !maps->machine->env->arch)
if (!machine->env || !machine->env->arch)
goto out_register;
dso_type = dso__type(map->dso, maps->machine);
dso_type = dso__type(map->dso, machine);
if (dso_type == DSO__TYPE_UNKNOWN)
return 0;
arch = perf_env__arch(maps->machine->env);
arch = perf_env__arch(machine->env);
if (!strcmp(arch, "x86")) {
if (dso_type != DSO__TYPE_64BIT)
......@@ -60,7 +62,7 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized
out_register:
unwind__register_ops(maps, ops);
err = maps->unwind_libunwind_ops->prepare_access(maps);
err = maps__unwind_libunwind_ops(maps)->prepare_access(maps);
if (initialized)
*initialized = err ? false : true;
return err;
......@@ -68,14 +70,18 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized
void unwind__flush_access(struct maps *maps)
{
if (maps->unwind_libunwind_ops)
maps->unwind_libunwind_ops->flush_access(maps);
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps);
if (ops)
ops->flush_access(maps);
}
void unwind__finish_access(struct maps *maps)
{
if (maps->unwind_libunwind_ops)
maps->unwind_libunwind_ops->finish_access(maps);
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps);
if (ops)
ops->finish_access(maps);
}
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
......@@ -83,8 +89,9 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct perf_sample *data, int max_stack,
bool best_effort)
{
if (thread->maps->unwind_libunwind_ops)
return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
max_stack, best_effort);
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(thread->maps);
if (ops)
return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment