Commit a8af6e48 authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf test: Roundtrip name, don't assume 1 event per name

Opening hardware names and a legacy cache event on a hybrid PMU opens
it on each PMU. Parsing and checking indexes fails, as the parsed
index is double the expected. Avoid checking the index by just
comparing the names immediately after the parse.

This change removes hard coded hybrid logic and removes assumptions
about the expansion of an event. On hybrid the PMUs may or may not
support an event and so using a distance isn't a consistent solution.
Reviewed-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Tested-by: default avatarKan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Caleb Biggers <caleb.biggers@intel.com>
Cc: Edward Baker <edward.baker@intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Perry Taylor <perry.taylor@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Samantha Alt <samantha.alt@intel.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: Yang Jihong <yangjihong1@huawei.com>
Link: https://lore.kernel.org/r/20230502223851.2234828-15-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 4a7c4eaf
...@@ -4,114 +4,93 @@ ...@@ -4,114 +4,93 @@
#include "parse-events.h" #include "parse-events.h"
#include "tests.h" #include "tests.h"
#include "debug.h" #include "debug.h"
#include "pmu.h"
#include "pmu-hybrid.h"
#include <errno.h>
#include <linux/kernel.h> #include <linux/kernel.h>
static int perf_evsel__roundtrip_cache_name_test(void) static int perf_evsel__roundtrip_cache_name_test(void)
{ {
char name[128]; int ret = TEST_OK;
int type, op, err = 0, ret = 0, i, idx;
struct evsel *evsel;
struct evlist *evlist = evlist__new();
if (evlist == NULL) for (int type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
return -ENOMEM; for (int op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
/* skip invalid cache type */ /* skip invalid cache type */
if (!evsel__is_cache_op_valid(type, op)) if (!evsel__is_cache_op_valid(type, op))
continue; continue;
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { for (int res = 0; res < PERF_COUNT_HW_CACHE_RESULT_MAX; res++) {
__evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); char name[128];
err = parse_event(evlist, name); struct evlist *evlist = evlist__new();
if (err) struct evsel *evsel;
ret = err; int err;
}
}
}
idx = 0;
evsel = evlist__first(evlist);
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { if (evlist == NULL) {
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { pr_debug("Failed to alloc evlist");
/* skip invalid cache type */ return TEST_FAIL;
if (!evsel__is_cache_op_valid(type, op)) }
continue; __evsel__hw_cache_type_op_res_name(type, op, res,
name, sizeof(name));
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { err = parse_event(evlist, name);
__evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); if (err) {
if (evsel->core.idx != idx) pr_debug("Failure to parse cache event '%s' possibly as PMUs don't support it",
name);
evlist__delete(evlist);
continue; continue;
}
++idx; evlist__for_each_entry(evlist, evsel) {
if (strcmp(evsel__name(evsel), name)) { if (strcmp(evsel__name(evsel), name)) {
pr_debug("%s != %s\n", evsel__name(evsel), name); pr_debug("%s != %s\n", evsel__name(evsel), name);
ret = -1; ret = TEST_FAIL;
} }
}
evsel = evsel__next(evsel); evlist__delete(evlist);
} }
} }
} }
evlist__delete(evlist);
return ret; return ret;
} }
static int __perf_evsel__name_array_test(const char *const names[], int nr_names, static int perf_evsel__name_array_test(const char *const names[], int nr_names)
int distance)
{ {
int i, err; int ret = TEST_OK;
struct evsel *evsel;
struct evlist *evlist = evlist__new();
if (evlist == NULL) for (int i = 0; i < nr_names; ++i) {
return -ENOMEM; struct evlist *evlist = evlist__new();
struct evsel *evsel;
int err;
for (i = 0; i < nr_names; ++i) { if (evlist == NULL) {
pr_debug("Failed to alloc evlist");
return TEST_FAIL;
}
err = parse_event(evlist, names[i]); err = parse_event(evlist, names[i]);
if (err) { if (err) {
pr_debug("failed to parse event '%s', err %d\n", pr_debug("failed to parse event '%s', err %d\n",
names[i], err); names[i], err);
goto out_delete_evlist; evlist__delete(evlist);
} ret = TEST_FAIL;
continue;
} }
err = 0;
evlist__for_each_entry(evlist, evsel) { evlist__for_each_entry(evlist, evsel) {
if (strcmp(evsel__name(evsel), names[evsel->core.idx / distance])) { if (strcmp(evsel__name(evsel), names[i])) {
--err; pr_debug("%s != %s\n", evsel__name(evsel), names[i]);
pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->core.idx / distance]); ret = TEST_FAIL;
} }
} }
out_delete_evlist:
evlist__delete(evlist); evlist__delete(evlist);
return err; }
return ret;
} }
#define perf_evsel__name_array_test(names, distance) \
__perf_evsel__name_array_test(names, ARRAY_SIZE(names), distance)
static int test__perf_evsel__roundtrip_name_test(struct test_suite *test __maybe_unused, static int test__perf_evsel__roundtrip_name_test(struct test_suite *test __maybe_unused,
int subtest __maybe_unused) int subtest __maybe_unused)
{ {
int err = 0, ret = 0; int err = 0, ret = TEST_OK;
if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom"))
return perf_evsel__name_array_test(evsel__hw_names, 2);
err = perf_evsel__name_array_test(evsel__hw_names, 1); err = perf_evsel__name_array_test(evsel__hw_names, PERF_COUNT_HW_MAX);
if (err) if (err)
ret = err; ret = err;
err = __perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1, 1); err = perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1);
if (err) if (err)
ret = err; ret = err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment