Commit 38f01d8d authored by Jiri Olsa's avatar Jiri Olsa Committed by Arnaldo Carvalho de Melo

libperf: Add perf_cpu_map__get()/perf_cpu_map__put()

Moving the following functions:

  cpu_map__get()
  cpu_map__put()

to libperf with following names:

  perf_cpu_map__get()
  perf_cpu_map__put()

Committer notes:

Added fixes for arm/arm64
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-31-jolsa@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 397721e0
...@@ -181,7 +181,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr, ...@@ -181,7 +181,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
err = 0; err = 0;
out: out:
cpu_map__put(online_cpus); perf_cpu_map__put(online_cpus);
return err; return err;
} }
...@@ -517,7 +517,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, ...@@ -517,7 +517,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
} }
} }
cpu_map__put(online_cpus); perf_cpu_map__put(online_cpus);
return (CS_ETM_HEADER_SIZE + return (CS_ETM_HEADER_SIZE +
(etmv4 * CS_ETMV4_PRIV_SIZE) + (etmv4 * CS_ETMV4_PRIV_SIZE) +
...@@ -679,7 +679,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, ...@@ -679,7 +679,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
if (cpu_map__has(cpu_map, i)) if (cpu_map__has(cpu_map, i))
cs_etm_get_metadata(i, &offset, itr, info); cs_etm_get_metadata(i, &offset, itr, info);
cpu_map__put(online_cpus); perf_cpu_map__put(online_cpus);
return 0; return 0;
} }
......
...@@ -27,7 +27,7 @@ char *get_cpuid_str(struct perf_pmu *pmu) ...@@ -27,7 +27,7 @@ char *get_cpuid_str(struct perf_pmu *pmu)
return NULL; return NULL;
/* read midr from list of cpus mapped to this pmu */ /* read midr from list of cpus mapped to this pmu */
cpus = cpu_map__get(pmu->cpus); cpus = perf_cpu_map__get(pmu->cpus);
for (cpu = 0; cpu < cpus->nr; cpu++) { for (cpu = 0; cpu < cpus->nr; cpu++) {
scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR, scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
sysfs, cpus->map[cpu]); sysfs, cpus->map[cpu]);
...@@ -60,6 +60,6 @@ char *get_cpuid_str(struct perf_pmu *pmu) ...@@ -60,6 +60,6 @@ char *get_cpuid_str(struct perf_pmu *pmu)
buf = NULL; buf = NULL;
} }
cpu_map__put(cpus); perf_cpu_map__put(cpus);
return buf; return buf;
} }
...@@ -206,7 +206,7 @@ static int reset_tracing_cpu(void) ...@@ -206,7 +206,7 @@ static int reset_tracing_cpu(void)
int ret; int ret;
ret = set_tracing_cpumask(cpumap); ret = set_tracing_cpumask(cpumap);
cpu_map__put(cpumap); perf_cpu_map__put(cpumap);
return ret; return ret;
} }
......
...@@ -933,8 +933,8 @@ static int perf_stat_init_aggr_mode(void) ...@@ -933,8 +933,8 @@ static int perf_stat_init_aggr_mode(void)
static void perf_stat__exit_aggr_mode(void) static void perf_stat__exit_aggr_mode(void)
{ {
cpu_map__put(stat_config.aggr_map); perf_cpu_map__put(stat_config.aggr_map);
cpu_map__put(stat_config.cpus_aggr_map); perf_cpu_map__put(stat_config.cpus_aggr_map);
stat_config.aggr_map = NULL; stat_config.aggr_map = NULL;
stat_config.cpus_aggr_map = NULL; stat_config.cpus_aggr_map = NULL;
} }
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
#include <stdlib.h> #include <stdlib.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <internal/cpumap.h> #include <internal/cpumap.h>
#include <asm/bug.h>
#include <stdio.h>
struct perf_cpu_map *perf_cpu_map__dummy_new(void) struct perf_cpu_map *perf_cpu_map__dummy_new(void)
{ {
...@@ -16,3 +18,25 @@ struct perf_cpu_map *perf_cpu_map__dummy_new(void) ...@@ -16,3 +18,25 @@ struct perf_cpu_map *perf_cpu_map__dummy_new(void)
return cpus; return cpus;
} }
static void cpu_map__delete(struct perf_cpu_map *map)
{
if (map) {
WARN_ONCE(refcount_read(&map->refcnt) != 0,
"cpu_map refcnt unbalanced\n");
free(map);
}
}
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
{
if (map)
refcount_inc(&map->refcnt);
return map;
}
void perf_cpu_map__put(struct perf_cpu_map *map)
{
if (map && refcount_dec_and_test(&map->refcnt))
cpu_map__delete(map);
}
...@@ -7,5 +7,7 @@ ...@@ -7,5 +7,7 @@
struct perf_cpu_map; struct perf_cpu_map;
LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void); LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
#endif /* __LIBPERF_CPUMAP_H */ #endif /* __LIBPERF_CPUMAP_H */
...@@ -2,6 +2,8 @@ LIBPERF_0.0.1 { ...@@ -2,6 +2,8 @@ LIBPERF_0.0.1 {
global: global:
libperf_set_print; libperf_set_print;
perf_cpu_map__dummy_new; perf_cpu_map__dummy_new;
perf_cpu_map__get;
perf_cpu_map__put;
local: local:
*; *;
}; };
...@@ -21,7 +21,7 @@ static unsigned long *get_bitmap(const char *str, int nbits) ...@@ -21,7 +21,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
} }
if (map) if (map)
cpu_map__put(map); perf_cpu_map__put(map);
return bm; return bm;
} }
......
...@@ -655,7 +655,7 @@ static int do_test_code_reading(bool try_kcore) ...@@ -655,7 +655,7 @@ static int do_test_code_reading(bool try_kcore)
* and will be freed by following perf_evlist__set_maps * and will be freed by following perf_evlist__set_maps
* call. Getting refference to keep them alive. * call. Getting refference to keep them alive.
*/ */
cpu_map__get(cpus); perf_cpu_map__get(cpus);
thread_map__get(threads); thread_map__get(threads);
perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__set_maps(evlist, NULL, NULL);
evlist__delete(evlist); evlist__delete(evlist);
...@@ -705,7 +705,7 @@ static int do_test_code_reading(bool try_kcore) ...@@ -705,7 +705,7 @@ static int do_test_code_reading(bool try_kcore)
if (evlist) { if (evlist) {
evlist__delete(evlist); evlist__delete(evlist);
} else { } else {
cpu_map__put(cpus); perf_cpu_map__put(cpus);
thread_map__put(threads); thread_map__put(threads);
} }
machine__delete_threads(machine); machine__delete_threads(machine);
......
...@@ -39,7 +39,7 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused, ...@@ -39,7 +39,7 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong cpu", map->map[i] == i); TEST_ASSERT_VAL("wrong cpu", map->map[i] == i);
} }
cpu_map__put(map); perf_cpu_map__put(map);
return 0; return 0;
} }
...@@ -68,7 +68,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused, ...@@ -68,7 +68,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1); TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1);
TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256); TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256);
TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1); TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1);
cpu_map__put(map); perf_cpu_map__put(map);
return 0; return 0;
} }
...@@ -83,7 +83,7 @@ int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __may ...@@ -83,7 +83,7 @@ int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __may
TEST_ASSERT_VAL("failed to synthesize map", TEST_ASSERT_VAL("failed to synthesize map",
!perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL)); !perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
cpu_map__put(cpus); perf_cpu_map__put(cpus);
/* This one is better stores in cpu values. */ /* This one is better stores in cpu values. */
cpus = cpu_map__new("1,256"); cpus = cpu_map__new("1,256");
...@@ -91,7 +91,7 @@ int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __may ...@@ -91,7 +91,7 @@ int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __may
TEST_ASSERT_VAL("failed to synthesize map", TEST_ASSERT_VAL("failed to synthesize map",
!perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL)); !perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
cpu_map__put(cpus); perf_cpu_map__put(cpus);
return 0; return 0;
} }
......
...@@ -132,7 +132,7 @@ static int attach__cpu_disabled(struct evlist *evlist) ...@@ -132,7 +132,7 @@ static int attach__cpu_disabled(struct evlist *evlist)
return err; return err;
} }
cpu_map__put(cpus); perf_cpu_map__put(cpus);
return evsel__enable(evsel); return evsel__enable(evsel);
} }
...@@ -154,7 +154,7 @@ static int attach__cpu_enabled(struct evlist *evlist) ...@@ -154,7 +154,7 @@ static int attach__cpu_enabled(struct evlist *evlist)
if (err == -EACCES) if (err == -EACCES)
return TEST_SKIP; return TEST_SKIP;
cpu_map__put(cpus); perf_cpu_map__put(cpus);
return err ? TEST_FAIL : TEST_OK; return err ? TEST_FAIL : TEST_OK;
} }
......
...@@ -73,7 +73,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused, ...@@ -73,7 +73,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong cpus", map->map[0] == 1); TEST_ASSERT_VAL("wrong cpus", map->map[0] == 1);
TEST_ASSERT_VAL("wrong cpus", map->map[1] == 2); TEST_ASSERT_VAL("wrong cpus", map->map[1] == 2);
TEST_ASSERT_VAL("wrong cpus", map->map[2] == 3); TEST_ASSERT_VAL("wrong cpus", map->map[2] == 3);
cpu_map__put(map); perf_cpu_map__put(map);
return 0; return 0;
} }
...@@ -113,6 +113,6 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu ...@@ -113,6 +113,6 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
TEST_ASSERT_VAL("failed to synthesize attr update cpus", TEST_ASSERT_VAL("failed to synthesize attr update cpus",
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus)); !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
cpu_map__put(evsel->own_cpus); perf_cpu_map__put(evsel->own_cpus);
return 0; return 0;
} }
...@@ -149,7 +149,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un ...@@ -149,7 +149,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
evlist__disable(evlist); evlist__disable(evlist);
evlist__delete(evlist); evlist__delete(evlist);
} else { } else {
cpu_map__put(cpus); perf_cpu_map__put(cpus);
thread_map__put(threads); thread_map__put(threads);
} }
......
...@@ -32,7 +32,7 @@ static unsigned long *get_bitmap(const char *str, int nbits) ...@@ -32,7 +32,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
} }
if (map) if (map)
cpu_map__put(map); perf_cpu_map__put(map);
else else
free(bm); free(bm);
......
...@@ -155,7 +155,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse ...@@ -155,7 +155,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
cpus = NULL; cpus = NULL;
threads = NULL; threads = NULL;
out_free_cpus: out_free_cpus:
cpu_map__put(cpus); perf_cpu_map__put(cpus);
out_free_threads: out_free_threads:
thread_map__put(threads); thread_map__put(threads);
return err; return err;
......
...@@ -120,7 +120,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int ...@@ -120,7 +120,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
out_evsel_delete: out_evsel_delete:
evsel__delete(evsel); evsel__delete(evsel);
out_cpu_map_delete: out_cpu_map_delete:
cpu_map__put(cpus); perf_cpu_map__put(cpus);
out_thread_map_delete: out_thread_map_delete:
thread_map__put(threads); thread_map__put(threads);
return err; return err;
......
...@@ -125,7 +125,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) ...@@ -125,7 +125,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
} }
out_free_maps: out_free_maps:
cpu_map__put(cpus); perf_cpu_map__put(cpus);
thread_map__put(threads); thread_map__put(threads);
out_delete_evlist: out_delete_evlist:
evlist__delete(evlist); evlist__delete(evlist);
......
...@@ -569,7 +569,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_ ...@@ -569,7 +569,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
evlist__disable(evlist); evlist__disable(evlist);
evlist__delete(evlist); evlist__delete(evlist);
} else { } else {
cpu_map__put(cpus); perf_cpu_map__put(cpus);
thread_map__put(threads); thread_map__put(threads);
} }
......
...@@ -135,7 +135,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused ...@@ -135,7 +135,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
} }
out_free_maps: out_free_maps:
cpu_map__put(cpus); perf_cpu_map__put(cpus);
thread_map__put(threads); thread_map__put(threads);
out_delete_evlist: out_delete_evlist:
evlist__delete(evlist); evlist__delete(evlist);
......
...@@ -133,7 +133,7 @@ int test__session_topology(struct test *test __maybe_unused, int subtest __maybe ...@@ -133,7 +133,7 @@ int test__session_topology(struct test *test __maybe_unused, int subtest __maybe
} }
ret = check_cpu_topology(path, map); ret = check_cpu_topology(path, map);
cpu_map__put(map); perf_cpu_map__put(map);
free_path: free_path:
unlink(path); unlink(path);
......
...@@ -273,28 +273,6 @@ struct perf_cpu_map *cpu_map__empty_new(int nr) ...@@ -273,28 +273,6 @@ struct perf_cpu_map *cpu_map__empty_new(int nr)
return cpus; return cpus;
} }
static void cpu_map__delete(struct perf_cpu_map *map)
{
if (map) {
WARN_ONCE(refcount_read(&map->refcnt) != 0,
"cpu_map refcnt unbalanced\n");
free(map);
}
}
struct perf_cpu_map *cpu_map__get(struct perf_cpu_map *map)
{
if (map)
refcount_inc(&map->refcnt);
return map;
}
void cpu_map__put(struct perf_cpu_map *map)
{
if (map && refcount_dec_and_test(&map->refcnt))
cpu_map__delete(map);
}
static int cpu__get_topology_int(int cpu, const char *name, int *value) static int cpu__get_topology_int(int cpu, const char *name, int *value)
{ {
char path[PATH_MAX]; char path[PATH_MAX];
......
...@@ -29,9 +29,6 @@ int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep ...@@ -29,9 +29,6 @@ int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep
int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep); int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep);
const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */ const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */
struct perf_cpu_map *cpu_map__get(struct perf_cpu_map *map);
void cpu_map__put(struct perf_cpu_map *map);
static inline int cpu_map__socket(struct perf_cpu_map *sock, int s) static inline int cpu_map__socket(struct perf_cpu_map *sock, int s)
{ {
if (!sock || s > sock->nr || s < 0) if (!sock || s > sock->nr || s < 0)
......
...@@ -219,7 +219,7 @@ struct cpu_topology *cpu_topology__new(void) ...@@ -219,7 +219,7 @@ struct cpu_topology *cpu_topology__new(void)
} }
out_free: out_free:
cpu_map__put(map); perf_cpu_map__put(map);
if (ret) { if (ret) {
cpu_topology__delete(tp); cpu_topology__delete(tp);
tp = NULL; tp = NULL;
...@@ -335,7 +335,7 @@ struct numa_topology *numa_topology__new(void) ...@@ -335,7 +335,7 @@ struct numa_topology *numa_topology__new(void)
out: out:
free(buf); free(buf);
fclose(fp); fclose(fp);
cpu_map__put(node_map); perf_cpu_map__put(node_map);
return tp; return tp;
} }
......
...@@ -179,7 +179,7 @@ void perf_env__exit(struct perf_env *env) ...@@ -179,7 +179,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->cpu); zfree(&env->cpu);
for (i = 0; i < env->nr_numa_nodes; i++) for (i = 0; i < env->nr_numa_nodes; i++)
cpu_map__put(env->numa_nodes[i].map); perf_cpu_map__put(env->numa_nodes[i].map);
zfree(&env->numa_nodes); zfree(&env->numa_nodes);
for (i = 0; i < env->caches_cnt; i++) for (i = 0; i < env->caches_cnt; i++)
......
...@@ -1403,7 +1403,7 @@ size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp) ...@@ -1403,7 +1403,7 @@ size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
else else
ret += fprintf(fp, "failed to get cpumap from event\n"); ret += fprintf(fp, "failed to get cpumap from event\n");
cpu_map__put(cpus); perf_cpu_map__put(cpus);
return ret; return ret;
} }
......
...@@ -141,7 +141,7 @@ void evlist__delete(struct evlist *evlist) ...@@ -141,7 +141,7 @@ void evlist__delete(struct evlist *evlist)
perf_evlist__munmap(evlist); perf_evlist__munmap(evlist);
evlist__close(evlist); evlist__close(evlist);
cpu_map__put(evlist->cpus); perf_cpu_map__put(evlist->cpus);
thread_map__put(evlist->threads); thread_map__put(evlist->threads);
evlist->cpus = NULL; evlist->cpus = NULL;
evlist->threads = NULL; evlist->threads = NULL;
...@@ -158,11 +158,11 @@ static void __perf_evlist__propagate_maps(struct evlist *evlist, ...@@ -158,11 +158,11 @@ static void __perf_evlist__propagate_maps(struct evlist *evlist,
* keep it, if there's no target cpu list defined. * keep it, if there's no target cpu list defined.
*/ */
if (!evsel->own_cpus || evlist->has_user_cpus) { if (!evsel->own_cpus || evlist->has_user_cpus) {
cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
evsel->cpus = cpu_map__get(evlist->cpus); evsel->cpus = perf_cpu_map__get(evlist->cpus);
} else if (evsel->cpus != evsel->own_cpus) { } else if (evsel->cpus != evsel->own_cpus) {
cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
evsel->cpus = cpu_map__get(evsel->own_cpus); evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
} }
thread_map__put(evsel->threads); thread_map__put(evsel->threads);
...@@ -1115,8 +1115,8 @@ void perf_evlist__set_maps(struct evlist *evlist, struct perf_cpu_map *cpus, ...@@ -1115,8 +1115,8 @@ void perf_evlist__set_maps(struct evlist *evlist, struct perf_cpu_map *cpus,
* the caller to increase the reference count. * the caller to increase the reference count.
*/ */
if (cpus != evlist->cpus) { if (cpus != evlist->cpus) {
cpu_map__put(evlist->cpus); perf_cpu_map__put(evlist->cpus);
evlist->cpus = cpu_map__get(cpus); evlist->cpus = perf_cpu_map__get(cpus);
} }
if (threads != evlist->threads) { if (threads != evlist->threads) {
...@@ -1383,7 +1383,7 @@ static int perf_evlist__create_syswide_maps(struct evlist *evlist) ...@@ -1383,7 +1383,7 @@ static int perf_evlist__create_syswide_maps(struct evlist *evlist)
out: out:
return err; return err;
out_put: out_put:
cpu_map__put(cpus); perf_cpu_map__put(cpus);
goto out; goto out;
} }
......
...@@ -1325,8 +1325,8 @@ void perf_evsel__exit(struct evsel *evsel) ...@@ -1325,8 +1325,8 @@ void perf_evsel__exit(struct evsel *evsel)
perf_evsel__free_id(evsel); perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel); perf_evsel__free_config_terms(evsel);
cgroup__put(evsel->cgrp); cgroup__put(evsel->cgrp);
cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
cpu_map__put(evsel->own_cpus); perf_cpu_map__put(evsel->own_cpus);
thread_map__put(evsel->threads); thread_map__put(evsel->threads);
zfree(&evsel->group_name); zfree(&evsel->group_name);
zfree(&evsel->name); zfree(&evsel->name);
......
...@@ -332,8 +332,8 @@ __add_event(struct list_head *list, int *idx, ...@@ -332,8 +332,8 @@ __add_event(struct list_head *list, int *idx,
return NULL; return NULL;
(*idx)++; (*idx)++;
evsel->cpus = cpu_map__get(cpus); evsel->cpus = perf_cpu_map__get(cpus);
evsel->own_cpus = cpu_map__get(cpus); evsel->own_cpus = perf_cpu_map__get(cpus);
evsel->system_wide = pmu ? pmu->is_uncore : false; evsel->system_wide = pmu ? pmu->is_uncore : false;
evsel->auto_merge_stats = auto_merge_stats; evsel->auto_merge_stats = auto_merge_stats;
......
...@@ -626,7 +626,7 @@ static bool pmu_is_uncore(const char *name) ...@@ -626,7 +626,7 @@ static bool pmu_is_uncore(const char *name)
snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name); snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
cpus = __pmu_cpumask(path); cpus = __pmu_cpumask(path);
cpu_map__put(cpus); perf_cpu_map__put(cpus);
return !!cpus; return !!cpus;
} }
......
...@@ -557,7 +557,7 @@ static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, ...@@ -557,7 +557,7 @@ static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
{ {
cpu_map__put(pcpus->cpus); perf_cpu_map__put(pcpus->cpus);
Py_TYPE(pcpus)->tp_free((PyObject*)pcpus); Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
} }
......
...@@ -67,7 +67,7 @@ static bool perf_probe_api(setup_probe_fn_t fn) ...@@ -67,7 +67,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
if (!cpus) if (!cpus)
return false; return false;
cpu = cpus->map[0]; cpu = cpus->map[0];
cpu_map__put(cpus); perf_cpu_map__put(cpus);
do { do {
ret = perf_do_probe_api(fn, cpu, try[i++]); ret = perf_do_probe_api(fn, cpu, try[i++]);
...@@ -122,7 +122,7 @@ bool perf_can_record_cpu_wide(void) ...@@ -122,7 +122,7 @@ bool perf_can_record_cpu_wide(void)
if (!cpus) if (!cpus)
return false; return false;
cpu = cpus->map[0]; cpu = cpus->map[0];
cpu_map__put(cpus); perf_cpu_map__put(cpus);
fd = sys_perf_event_open(&attr, -1, cpu, -1, 0); fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
if (fd < 0) if (fd < 0)
...@@ -278,7 +278,7 @@ bool perf_evlist__can_select_event(struct evlist *evlist, const char *str) ...@@ -278,7 +278,7 @@ bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
struct perf_cpu_map *cpus = cpu_map__new(NULL); struct perf_cpu_map *cpus = cpu_map__new(NULL);
cpu = cpus ? cpus->map[0] : 0; cpu = cpus ? cpus->map[0] : 0;
cpu_map__put(cpus); perf_cpu_map__put(cpus);
} else { } else {
cpu = evlist->cpus->map[0]; cpu = evlist->cpus->map[0];
} }
......
...@@ -2310,7 +2310,7 @@ int perf_session__cpu_bitmap(struct perf_session *session, ...@@ -2310,7 +2310,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
err = 0; err = 0;
out_delete_map: out_delete_map:
cpu_map__put(map); perf_cpu_map__put(map);
return err; return err;
} }
......
...@@ -745,7 +745,7 @@ static int str_to_bitmap(char *s, cpumask_t *b) ...@@ -745,7 +745,7 @@ static int str_to_bitmap(char *s, cpumask_t *b)
set_bit(c, cpumask_bits(b)); set_bit(c, cpumask_bits(b));
} }
cpu_map__put(m); perf_cpu_map__put(m);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment