Commit 6f844b1f authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf evsel: Rename variable cpu to index

Make naming less error prone.
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Clarke <pc@us.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Vineet Singh <vineet.singh@intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: zhengjun.xing@intel.com
Link: https://lore.kernel.org/r/20220105061351.120843-40-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 1fa497d4
...@@ -1372,9 +1372,9 @@ int evsel__append_addr_filter(struct evsel *evsel, const char *filter) ...@@ -1372,9 +1372,9 @@ int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
} }
/* Caller has to clear disabled after going through all CPUs. */ /* Caller has to clear disabled after going through all CPUs. */
int evsel__enable_cpu(struct evsel *evsel, int cpu) int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
{ {
return perf_evsel__enable_cpu(&evsel->core, cpu); return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
} }
int evsel__enable(struct evsel *evsel) int evsel__enable(struct evsel *evsel)
...@@ -1387,9 +1387,9 @@ int evsel__enable(struct evsel *evsel) ...@@ -1387,9 +1387,9 @@ int evsel__enable(struct evsel *evsel)
} }
/* Caller has to set disabled after going through all CPUs. */ /* Caller has to set disabled after going through all CPUs. */
int evsel__disable_cpu(struct evsel *evsel, int cpu) int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
{ {
return perf_evsel__disable_cpu(&evsel->core, cpu); return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
} }
int evsel__disable(struct evsel *evsel) int evsel__disable(struct evsel *evsel)
...@@ -1455,7 +1455,7 @@ void evsel__delete(struct evsel *evsel) ...@@ -1455,7 +1455,7 @@ void evsel__delete(struct evsel *evsel)
free(evsel); free(evsel);
} }
void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count) struct perf_counts_values *count)
{ {
struct perf_counts_values tmp; struct perf_counts_values tmp;
...@@ -1463,12 +1463,12 @@ void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, ...@@ -1463,12 +1463,12 @@ void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
if (!evsel->prev_raw_counts) if (!evsel->prev_raw_counts)
return; return;
if (cpu == -1) { if (cpu_map_idx == -1) {
tmp = evsel->prev_raw_counts->aggr; tmp = evsel->prev_raw_counts->aggr;
evsel->prev_raw_counts->aggr = *count; evsel->prev_raw_counts->aggr = *count;
} else { } else {
tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
*perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
} }
count->val = count->val - tmp.val; count->val = count->val - tmp.val;
...@@ -1483,20 +1483,21 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread) ...@@ -1483,20 +1483,21 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count); return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
} }
static void evsel__set_count(struct evsel *counter, int cpu, int thread, u64 val, u64 ena, u64 run) static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
u64 val, u64 ena, u64 run)
{ {
struct perf_counts_values *count; struct perf_counts_values *count;
count = perf_counts(counter->counts, cpu, thread); count = perf_counts(counter->counts, cpu_map_idx, thread);
count->val = val; count->val = val;
count->ena = ena; count->ena = ena;
count->run = run; count->run = run;
perf_counts__set_loaded(counter->counts, cpu, thread, true); perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
} }
static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, u64 *data) static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
{ {
u64 read_format = leader->core.attr.read_format; u64 read_format = leader->core.attr.read_format;
struct sample_read_value *v; struct sample_read_value *v;
...@@ -1515,7 +1516,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, ...@@ -1515,7 +1516,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu, int thread,
v = (struct sample_read_value *) data; v = (struct sample_read_value *) data;
evsel__set_count(leader, cpu, thread, v[0].value, ena, run); evsel__set_count(leader, cpu_map_idx, thread, v[0].value, ena, run);
for (i = 1; i < nr; i++) { for (i = 1; i < nr; i++) {
struct evsel *counter; struct evsel *counter;
...@@ -1524,7 +1525,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, ...@@ -1524,7 +1525,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu, int thread,
if (!counter) if (!counter)
return -EINVAL; return -EINVAL;
evsel__set_count(counter, cpu, thread, v[i].value, ena, run); evsel__set_count(counter, cpu_map_idx, thread, v[i].value, ena, run);
} }
return 0; return 0;
...@@ -1643,16 +1644,16 @@ static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int ...@@ -1643,16 +1644,16 @@ static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int
} }
static int update_fds(struct evsel *evsel, static int update_fds(struct evsel *evsel,
int nr_cpus, int cpu_idx, int nr_cpus, int cpu_map_idx,
int nr_threads, int thread_idx) int nr_threads, int thread_idx)
{ {
struct evsel *pos; struct evsel *pos;
if (cpu_idx >= nr_cpus || thread_idx >= nr_threads) if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads)
return -EINVAL; return -EINVAL;
evlist__for_each_entry(evsel->evlist, pos) { evlist__for_each_entry(evsel->evlist, pos) {
nr_cpus = pos != evsel ? nr_cpus : cpu_idx; nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
...@@ -1667,7 +1668,7 @@ static int update_fds(struct evsel *evsel, ...@@ -1667,7 +1668,7 @@ static int update_fds(struct evsel *evsel,
} }
static bool evsel__ignore_missing_thread(struct evsel *evsel, static bool evsel__ignore_missing_thread(struct evsel *evsel,
int nr_cpus, int cpu, int nr_cpus, int cpu_map_idx,
struct perf_thread_map *threads, struct perf_thread_map *threads,
int thread, int err) int thread, int err)
{ {
...@@ -1692,7 +1693,7 @@ static bool evsel__ignore_missing_thread(struct evsel *evsel, ...@@ -1692,7 +1693,7 @@ static bool evsel__ignore_missing_thread(struct evsel *evsel,
* We should remove fd for missing_thread first * We should remove fd for missing_thread first
* because thread_map__remove() will decrease threads->nr. * because thread_map__remove() will decrease threads->nr.
*/ */
if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread)) if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
return false; return false;
if (thread_map__remove(threads, thread)) if (thread_map__remove(threads, thread))
...@@ -1974,9 +1975,9 @@ bool evsel__increase_rlimit(enum rlimit_action *set_rlimit) ...@@ -1974,9 +1975,9 @@ bool evsel__increase_rlimit(enum rlimit_action *set_rlimit)
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads, struct perf_thread_map *threads,
int start_cpu, int end_cpu) int start_cpu_map_idx, int end_cpu_map_idx)
{ {
int cpu, thread, nthreads; int idx, thread, nthreads;
int pid = -1, err, old_errno; int pid = -1, err, old_errno;
enum rlimit_action set_rlimit = NO_CHANGE; enum rlimit_action set_rlimit = NO_CHANGE;
...@@ -2003,7 +2004,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2003,7 +2004,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
display_attr(&evsel->core.attr); display_attr(&evsel->core.attr);
for (cpu = start_cpu; cpu < end_cpu; cpu++) { for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
for (thread = 0; thread < nthreads; thread++) { for (thread = 0; thread < nthreads; thread++) {
int fd, group_fd; int fd, group_fd;
...@@ -2014,17 +2015,17 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2014,17 +2015,17 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
if (!evsel->cgrp && !evsel->core.system_wide) if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread); pid = perf_thread_map__pid(threads, thread);
group_fd = get_group_fd(evsel, cpu, thread); group_fd = get_group_fd(evsel, idx, thread);
test_attr__ready(); test_attr__ready();
pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, cpus->map[cpu], group_fd, evsel->open_flags); pid, cpus->map[idx], group_fd, evsel->open_flags);
fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[cpu], fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[idx],
group_fd, evsel->open_flags); group_fd, evsel->open_flags);
FD(evsel, cpu, thread) = fd; FD(evsel, idx, thread) = fd;
if (fd < 0) { if (fd < 0) {
err = -errno; err = -errno;
...@@ -2034,10 +2035,10 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2034,10 +2035,10 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
goto try_fallback; goto try_fallback;
} }
bpf_counter__install_pe(evsel, cpu, fd); bpf_counter__install_pe(evsel, idx, fd);
if (unlikely(test_attr__enabled)) { if (unlikely(test_attr__enabled)) {
test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], test_attr__open(&evsel->core.attr, pid, cpus->map[idx],
fd, group_fd, evsel->open_flags); fd, group_fd, evsel->open_flags);
} }
...@@ -2078,7 +2079,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2078,7 +2079,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
if (evsel__precise_ip_fallback(evsel)) if (evsel__precise_ip_fallback(evsel))
goto retry_open; goto retry_open;
if (evsel__ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { if (evsel__ignore_missing_thread(evsel, cpus->nr, idx, threads, thread, err)) {
/* We just removed 1 thread, so lower the upper nthreads limit. */ /* We just removed 1 thread, so lower the upper nthreads limit. */
nthreads--; nthreads--;
...@@ -2093,7 +2094,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2093,7 +2094,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit)) if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit))
goto retry_open; goto retry_open;
if (err != -EINVAL || cpu > 0 || thread > 0) if (err != -EINVAL || idx > 0 || thread > 0)
goto out_close; goto out_close;
if (evsel__detect_missing_features(evsel)) if (evsel__detect_missing_features(evsel))
...@@ -2105,12 +2106,12 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2105,12 +2106,12 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
old_errno = errno; old_errno = errno;
do { do {
while (--thread >= 0) { while (--thread >= 0) {
if (FD(evsel, cpu, thread) >= 0) if (FD(evsel, idx, thread) >= 0)
close(FD(evsel, cpu, thread)); close(FD(evsel, idx, thread));
FD(evsel, cpu, thread) = -1; FD(evsel, idx, thread) = -1;
} }
thread = nthreads; thread = nthreads;
} while (--cpu >= 0); } while (--idx >= 0);
errno = old_errno; errno = old_errno;
return err; return err;
} }
...@@ -2127,13 +2128,13 @@ void evsel__close(struct evsel *evsel) ...@@ -2127,13 +2128,13 @@ void evsel__close(struct evsel *evsel)
perf_evsel__free_id(&evsel->core); perf_evsel__free_id(&evsel->core);
} }
int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu) int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
{ {
if (cpu == -1) if (cpu_map_idx == -1)
return evsel__open_cpu(evsel, cpus, NULL, 0, return evsel__open_cpu(evsel, cpus, NULL, 0,
cpus ? cpus->nr : 1); cpus ? cpus->nr : 1);
return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1); return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
} }
int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
...@@ -2958,15 +2959,15 @@ struct perf_env *evsel__env(struct evsel *evsel) ...@@ -2958,15 +2959,15 @@ struct perf_env *evsel__env(struct evsel *evsel)
static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
{ {
int cpu, thread; int cpu_map_idx, thread;
for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) { for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
for (thread = 0; thread < xyarray__max_y(evsel->core.fd); for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
thread++) { thread++) {
int fd = FD(evsel, cpu, thread); int fd = FD(evsel, cpu_map_idx, thread);
if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
cpu, thread, fd) < 0) cpu_map_idx, thread, fd) < 0)
return -1; return -1;
} }
} }
......
...@@ -284,12 +284,12 @@ void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr); ...@@ -284,12 +284,12 @@ void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr);
int evsel__set_filter(struct evsel *evsel, const char *filter); int evsel__set_filter(struct evsel *evsel, const char *filter);
int evsel__append_tp_filter(struct evsel *evsel, const char *filter); int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
int evsel__append_addr_filter(struct evsel *evsel, const char *filter); int evsel__append_addr_filter(struct evsel *evsel, const char *filter);
int evsel__enable_cpu(struct evsel *evsel, int cpu); int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx);
int evsel__enable(struct evsel *evsel); int evsel__enable(struct evsel *evsel);
int evsel__disable(struct evsel *evsel); int evsel__disable(struct evsel *evsel);
int evsel__disable_cpu(struct evsel *evsel, int cpu); int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx);
int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu); int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx);
int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads); int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads);
int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads); struct perf_thread_map *threads);
......
...@@ -531,7 +531,7 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) ...@@ -531,7 +531,7 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
int create_perf_stat_counter(struct evsel *evsel, int create_perf_stat_counter(struct evsel *evsel,
struct perf_stat_config *config, struct perf_stat_config *config,
struct target *target, struct target *target,
int cpu) int cpu_map_idx)
{ {
struct perf_event_attr *attr = &evsel->core.attr; struct perf_event_attr *attr = &evsel->core.attr;
struct evsel *leader = evsel__leader(evsel); struct evsel *leader = evsel__leader(evsel);
...@@ -585,7 +585,7 @@ int create_perf_stat_counter(struct evsel *evsel, ...@@ -585,7 +585,7 @@ int create_perf_stat_counter(struct evsel *evsel,
} }
if (target__has_cpu(target) && !target__has_per_thread(target)) if (target__has_cpu(target) && !target__has_per_thread(target))
return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu); return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
return evsel__open_per_thread(evsel, evsel->core.threads); return evsel__open_per_thread(evsel, evsel->core.threads);
} }
...@@ -248,7 +248,7 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp); ...@@ -248,7 +248,7 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
int create_perf_stat_counter(struct evsel *evsel, int create_perf_stat_counter(struct evsel *evsel,
struct perf_stat_config *config, struct perf_stat_config *config,
struct target *target, struct target *target,
int cpu); int cpu_map_idx);
void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config, void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config,
struct target *_target, struct timespec *ts, int argc, const char **argv); struct target *_target, struct timespec *ts, int argc, const char **argv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment