Commit d7e3c397 authored by Zhengjun Xing's avatar Zhengjun Xing Committed by Arnaldo Carvalho de Melo

perf stat: Support hybrid --topdown option

Since for cpu_core or cpu_atom, they have different topdown events
groups.

For cpu_core, --topdown equals to:

"{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,
  cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/,
  cpu_core/topdown-heavy-ops/,cpu_core/topdown-br-mispredict/,
  cpu_core/topdown-fetch-lat/,cpu_core/topdown-mem-bound/}"

For cpu_atom, --topdown equals to:

"{cpu_atom/topdown-retiring/,cpu_atom/topdown-bad-spec/,
 cpu_atom/topdown-fe-bound/,cpu_atom/topdown-be-bound/}"

To simplify the implementation, on hybrid, --topdown is used
together with --cputype. If without --cputype, it uses cpu_core
topdown events by default.

  # ./perf stat --topdown -a  sleep 1
  WARNING: default to use cpu_core topdown events

   Performance counter stats for 'system wide':

              retiring      bad speculation       frontend bound        backend bound     heavy operations     light operations    branch mispredict       machine clears        fetch latency      fetch bandwidth         memory bound           Core bound
                  4.1%                 0.0%                 5.1%                90.8%                 2.3%                 1.8%                 0.0%                 0.0%                 4.2%                 0.9%                 9.9%                81.0%

         1.002624229 seconds time elapsed

  # ./perf stat --topdown -a --cputype atom  sleep 1

   Performance counter stats for 'system wide':

              retiring      bad speculation       frontend bound        backend bound
                 13.5%                 0.1%                31.2%                55.2%

         1.002366987 seconds time elapsed
Reviewed-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarXing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220422065635.767648-3-zhengjun.xing@linux.intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 2c8e6451
......@@ -1867,11 +1867,23 @@ static int add_default_attributes(void)
unsigned int max_level = 1;
char *str = NULL;
bool warn = false;
const char *pmu_name = "cpu";
if (!force_metric_only)
stat_config.metric_only = true;
if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) {
if (perf_pmu__has_hybrid()) {
if (!evsel_list->hybrid_pmu_name) {
pr_warning("WARNING: default to use cpu_core topdown events\n");
evsel_list->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core");
}
pmu_name = evsel_list->hybrid_pmu_name;
if (!pmu_name)
return -1;
}
if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) {
metric_attrs = topdown_metric_L2_attrs;
max_level = 2;
}
......@@ -1882,10 +1894,11 @@ static int add_default_attributes(void)
} else if (!stat_config.topdown_level)
stat_config.topdown_level = max_level;
if (topdown_filter_events(metric_attrs, &str, 1) < 0) {
if (topdown_filter_events(metric_attrs, &str, 1, pmu_name) < 0) {
pr_err("Out of memory\n");
return -1;
}
if (metric_attrs[0] && str) {
if (!stat_config.interval && !stat_config.metric_only) {
fprintf(stat_config.output,
......@@ -1909,10 +1922,12 @@ static int add_default_attributes(void)
}
if (topdown_filter_events(topdown_attrs, &str,
arch_topdown_check_group(&warn)) < 0) {
arch_topdown_check_group(&warn),
pmu_name) < 0) {
pr_err("Out of memory\n");
return -1;
}
if (topdown_attrs[0] && str) {
struct parse_events_error errinfo;
if (warn)
......
......@@ -116,7 +116,9 @@ static void perf_stat_evsel_id_init(struct evsel *evsel)
/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
if (!strcmp(evsel__name(evsel), id_str[i])) {
if (!strcmp(evsel__name(evsel), id_str[i]) ||
(strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name
&& strstr(evsel__name(evsel), evsel->pmu_name))) {
ps->id = i;
break;
}
......
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include "pmu.h"
#include "pmu-hybrid.h"
#include "topdown.h"
int topdown_filter_events(const char **attr, char **str, bool use_group)
int topdown_filter_events(const char **attr, char **str, bool use_group,
const char *pmu_name)
{
int off = 0;
int i;
int len = 0;
char *s;
bool is_hybrid = perf_pmu__is_hybrid(pmu_name);
for (i = 0; attr[i]; i++) {
if (pmu_have_event("cpu", attr[i])) {
len += strlen(attr[i]) + 1;
if (pmu_have_event(pmu_name, attr[i])) {
if (is_hybrid)
len += strlen(attr[i]) + strlen(pmu_name) + 3;
else
len += strlen(attr[i]) + 1;
attr[i - off] = attr[i];
} else
off++;
......@@ -30,7 +36,10 @@ int topdown_filter_events(const char **attr, char **str, bool use_group)
if (use_group)
*s++ = '{';
for (i = 0; attr[i]; i++) {
strcpy(s, attr[i]);
if (!is_hybrid)
strcpy(s, attr[i]);
else
sprintf(s, "%s/%s/", pmu_name, attr[i]);
s += strlen(s);
*s++ = ',';
}
......
......@@ -7,6 +7,7 @@ bool arch_topdown_check_group(bool *warn);
void arch_topdown_group_warn(void);
bool arch_topdown_sample_read(struct evsel *leader);
int topdown_filter_events(const char **attr, char **str, bool use_group);
int topdown_filter_events(const char **attr, char **str, bool use_group,
const char *pmu_name);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment