Commit 9b7c8547 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo-4.15-20171023' of...

Merge tag 'perf-core-for-mingo-4.15-20171023' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

 - Update vendor events JSON metrics for Intel's Broadwell, Broadwell
   Server, Haswell, Haswell Server, IvyBridge, IvyTown, JakeTown, Sandy
   Bridge, Skylake and SkyLake Server (Andi Kleen)

 - Add vendor event file for Intel's Goldmont Plus V1 (Kan Liang)

 - Move perf_mmap methods from 'perf record' and evlist.c to a separate
   mmap.[ch] pair, to better separate things and pave the way for further
   work on multithreading tools (Arnaldo Carvalho de Melo)

 - Do not check ABI headers in a detached tarball build, as it the kernel
   headers from where we copied tools/include/ are by definition not
   available (Arnaldo Carvalho de Melo)

 - Make 'perf script' use fprintf() like printing, i.e. receiving a FILE
   pointer so that it gets consistent with other tools/ code and allows
   for printing to per-event files (Arnaldo Carvalho de Melo)

 - Error handling fixes (resource release on exit) for 'perf script'
   and 'perf kmem' (Christophe JAILLET)

 - Make some 'perf event attr' tests optional on virtual machines, where
   tested counters are not available (Jiri Olsa)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 8776fe75 65db92e0
...@@ -204,7 +204,7 @@ For example Intel Core CPUs typically have four generic performance counters ...@@ -204,7 +204,7 @@ For example Intel Core CPUs typically have four generic performance counters
for the core, plus three fixed counters for instructions, cycles and for the core, plus three fixed counters for instructions, cycles and
ref-cycles. Some special events have restrictions on which counter they ref-cycles. Some special events have restrictions on which counter they
can schedule, and may not support multiple instances in a single group. can schedule, and may not support multiple instances in a single group.
When too many events are specified in the group none of them will not When too many events are specified in the group some of them will not
be measured. be measured.
Globally pinned events can limit the number of counters available for Globally pinned events can limit the number of counters available for
......
#include <linux/compiler.h>
#include <sys/types.h> #include <sys/types.h>
#include <regex.h> #include <regex.h>
...@@ -23,7 +24,7 @@ static struct ins_ops *arm__associate_instruction_ops(struct arch *arch, const c ...@@ -23,7 +24,7 @@ static struct ins_ops *arm__associate_instruction_ops(struct arch *arch, const c
return ops; return ops;
} }
static int arm__annotate_init(struct arch *arch) static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{ {
struct arm_annotate *arm; struct arm_annotate *arm;
int err; int err;
......
#include <linux/compiler.h>
#include <sys/types.h> #include <sys/types.h>
#include <regex.h> #include <regex.h>
...@@ -25,7 +26,7 @@ static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const ...@@ -25,7 +26,7 @@ static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const
return ops; return ops;
} }
static int arm64__annotate_init(struct arch *arch) static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{ {
struct arm64_annotate *arm; struct arm64_annotate *arm;
int err; int err;
......
#include <linux/compiler.h>
static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, const char *name) static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, const char *name)
{ {
int i; int i;
...@@ -46,7 +48,7 @@ static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, con ...@@ -46,7 +48,7 @@ static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, con
return ops; return ops;
} }
static int powerpc__annotate_init(struct arch *arch) static int powerpc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{ {
if (!arch->initialized) { if (!arch->initialized) {
arch->initialized = true; arch->initialized = true;
......
#include <linux/compiler.h>
static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name) static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name)
{ {
struct ins_ops *ops = NULL; struct ins_ops *ops = NULL;
...@@ -19,7 +21,7 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na ...@@ -19,7 +21,7 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na
return ops; return ops;
} }
static int s390__annotate_init(struct arch *arch) static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{ {
if (!arch->initialized) { if (!arch->initialized) {
arch->initialized = true; arch->initialized = true;
......
...@@ -122,3 +122,17 @@ static int x86__cpuid_parse(struct arch *arch, char *cpuid) ...@@ -122,3 +122,17 @@ static int x86__cpuid_parse(struct arch *arch, char *cpuid)
return -1; return -1;
} }
static int x86__annotate_init(struct arch *arch, char *cpuid)
{
int err = 0;
if (arch->initialized)
return 0;
if (cpuid)
err = x86__cpuid_parse(arch, cpuid);
arch->initialized = true;
return err;
}
...@@ -1983,7 +1983,8 @@ int cmd_kmem(int argc, const char **argv) ...@@ -1983,7 +1983,8 @@ int cmd_kmem(int argc, const char **argv)
if (perf_time__parse_str(&ptime, time_str) != 0) { if (perf_time__parse_str(&ptime, time_str) != 0) {
pr_err("Invalid time string\n"); pr_err("Invalid time string\n");
return -EINVAL; ret = -EINVAL;
goto out_delete;
} }
if (!strcmp(argv[0], "stat")) { if (!strcmp(argv[0], "stat")) {
......
...@@ -129,107 +129,12 @@ static int process_synthesized_event(struct perf_tool *tool, ...@@ -129,107 +129,12 @@ static int process_synthesized_event(struct perf_tool *tool,
return record__write(rec, event, event->header.size); return record__write(rec, event, event->header.size);
} }
static int static int record__pushfn(void *to, void *bf, size_t size)
backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
{
struct perf_event_header *pheader;
u64 evt_head = head;
int size = mask + 1;
pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
pheader = (struct perf_event_header *)(buf + (head & mask));
*start = head;
while (true) {
if (evt_head - head >= (unsigned int)size) {
pr_debug("Finished reading backward ring buffer: rewind\n");
if (evt_head - head > (unsigned int)size)
evt_head -= pheader->size;
*end = evt_head;
return 0;
}
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
if (pheader->size == 0) {
pr_debug("Finished reading backward ring buffer: get start\n");
*end = evt_head;
return 0;
}
evt_head += pheader->size;
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
}
WARN_ONCE(1, "Shouldn't get here\n");
return -1;
}
static int
rb_find_range(void *data, int mask, u64 head, u64 old,
u64 *start, u64 *end, bool backward)
{ {
if (!backward) { struct record *rec = to;
*start = old;
*end = head;
return 0;
}
return backward_rb_find_range(data, mask, head, start, end);
}
static int
record__mmap_read(struct record *rec, struct perf_mmap *md,
bool overwrite, bool backward)
{
u64 head = perf_mmap__read_head(md);
u64 old = md->prev;
u64 end = head, start = old;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
if (rb_find_range(data, md->mask, head,
old, &start, &end, backward))
return -1;
if (start == end)
return 0;
rec->samples++; rec->samples++;
return record__write(rec, bf, size);
size = end - start;
if (size > (unsigned long)(md->mask) + 1) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
return 0;
}
if ((start & md->mask) + size != (end & md->mask)) {
buf = &data[start & md->mask];
size = md->mask + 1 - (start & md->mask);
start += size;
if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
}
buf = &data[start & md->mask];
size = end - start;
start += size;
if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
out:
return rc;
} }
static volatile int done; static volatile int done;
...@@ -576,8 +481,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli ...@@ -576,8 +481,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap; struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
if (maps[i].base) { if (maps[i].base) {
if (record__mmap_read(rec, &maps[i], if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
evlist->overwrite, backward) != 0) {
rc = -1; rc = -1;
goto out; goto out;
} }
......
...@@ -500,69 +500,76 @@ static int perf_session__check_output_opt(struct perf_session *session) ...@@ -500,69 +500,76 @@ static int perf_session__check_output_opt(struct perf_session *session)
return 0; return 0;
} }
static void print_sample_iregs(struct perf_sample *sample, static int perf_sample__fprintf_iregs(struct perf_sample *sample,
struct perf_event_attr *attr) struct perf_event_attr *attr, FILE *fp)
{ {
struct regs_dump *regs = &sample->intr_regs; struct regs_dump *regs = &sample->intr_regs;
uint64_t mask = attr->sample_regs_intr; uint64_t mask = attr->sample_regs_intr;
unsigned i = 0, r; unsigned i = 0, r;
int printed = 0;
if (!regs) if (!regs)
return; return 0;
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) { for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++]; u64 val = regs->regs[i++];
printf("%5s:0x%"PRIx64" ", perf_reg_name(r), val); printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
} }
return printed;
} }
static void print_sample_uregs(struct perf_sample *sample, static int perf_sample__fprintf_uregs(struct perf_sample *sample,
struct perf_event_attr *attr) struct perf_event_attr *attr, FILE *fp)
{ {
struct regs_dump *regs = &sample->user_regs; struct regs_dump *regs = &sample->user_regs;
uint64_t mask = attr->sample_regs_user; uint64_t mask = attr->sample_regs_user;
unsigned i = 0, r; unsigned i = 0, r;
int printed = 0;
if (!regs || !regs->regs) if (!regs || !regs->regs)
return; return 0;
printf(" ABI:%" PRIu64 " ", regs->abi); printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi);
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) { for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++]; u64 val = regs->regs[i++];
printf("%5s:0x%"PRIx64" ", perf_reg_name(r), val); printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
} }
return printed;
} }
static void print_sample_start(struct perf_sample *sample, static int perf_sample__fprintf_start(struct perf_sample *sample,
struct thread *thread, struct thread *thread,
struct perf_evsel *evsel) struct perf_evsel *evsel, FILE *fp)
{ {
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
unsigned long secs; unsigned long secs;
unsigned long long nsecs; unsigned long long nsecs;
int printed = 0;
if (PRINT_FIELD(COMM)) { if (PRINT_FIELD(COMM)) {
if (latency_format) if (latency_format)
printf("%8.8s ", thread__comm_str(thread)); printed += fprintf(fp, "%8.8s ", thread__comm_str(thread));
else if (PRINT_FIELD(IP) && symbol_conf.use_callchain) else if (PRINT_FIELD(IP) && symbol_conf.use_callchain)
printf("%s ", thread__comm_str(thread)); printed += fprintf(fp, "%s ", thread__comm_str(thread));
else else
printf("%16s ", thread__comm_str(thread)); printed += fprintf(fp, "%16s ", thread__comm_str(thread));
} }
if (PRINT_FIELD(PID) && PRINT_FIELD(TID)) if (PRINT_FIELD(PID) && PRINT_FIELD(TID))
printf("%5d/%-5d ", sample->pid, sample->tid); printed += fprintf(fp, "%5d/%-5d ", sample->pid, sample->tid);
else if (PRINT_FIELD(PID)) else if (PRINT_FIELD(PID))
printf("%5d ", sample->pid); printed += fprintf(fp, "%5d ", sample->pid);
else if (PRINT_FIELD(TID)) else if (PRINT_FIELD(TID))
printf("%5d ", sample->tid); printed += fprintf(fp, "%5d ", sample->tid);
if (PRINT_FIELD(CPU)) { if (PRINT_FIELD(CPU)) {
if (latency_format) if (latency_format)
printf("%3d ", sample->cpu); printed += fprintf(fp, "%3d ", sample->cpu);
else else
printf("[%03d] ", sample->cpu); printed += fprintf(fp, "[%03d] ", sample->cpu);
} }
if (PRINT_FIELD(TIME)) { if (PRINT_FIELD(TIME)) {
...@@ -571,13 +578,15 @@ static void print_sample_start(struct perf_sample *sample, ...@@ -571,13 +578,15 @@ static void print_sample_start(struct perf_sample *sample,
nsecs -= secs * NSEC_PER_SEC; nsecs -= secs * NSEC_PER_SEC;
if (nanosecs) if (nanosecs)
printf("%5lu.%09llu: ", secs, nsecs); printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
else { else {
char sample_time[32]; char sample_time[32];
timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time)); timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
printf("%12s: ", sample_time); printed += fprintf(fp, "%12s: ", sample_time);
} }
} }
return printed;
} }
static inline char static inline char
...@@ -589,16 +598,17 @@ mispred_str(struct branch_entry *br) ...@@ -589,16 +598,17 @@ mispred_str(struct branch_entry *br)
return br->flags.predicted ? 'P' : 'M'; return br->flags.predicted ? 'P' : 'M';
} }
static void print_sample_brstack(struct perf_sample *sample, static int perf_sample__fprintf_brstack(struct perf_sample *sample,
struct thread *thread, struct thread *thread,
struct perf_event_attr *attr) struct perf_event_attr *attr, FILE *fp)
{ {
struct branch_stack *br = sample->branch_stack; struct branch_stack *br = sample->branch_stack;
struct addr_location alf, alt; struct addr_location alf, alt;
u64 i, from, to; u64 i, from, to;
int printed = 0;
if (!(br && br->nr)) if (!(br && br->nr))
return; return 0;
for (i = 0; i < br->nr; i++) { for (i = 0; i < br->nr; i++) {
from = br->entries[i].from; from = br->entries[i].from;
...@@ -611,38 +621,41 @@ static void print_sample_brstack(struct perf_sample *sample, ...@@ -611,38 +621,41 @@ static void print_sample_brstack(struct perf_sample *sample,
thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
} }
printf(" 0x%"PRIx64, from); printed += fprintf(fp, " 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) { if (PRINT_FIELD(DSO)) {
printf("("); printed += fprintf(fp, "(");
map__fprintf_dsoname(alf.map, stdout); printed += map__fprintf_dsoname(alf.map, fp);
printf(")"); printed += fprintf(fp, ")");
} }
printf("/0x%"PRIx64, to); printed += fprintf(fp, "/0x%"PRIx64, to);
if (PRINT_FIELD(DSO)) { if (PRINT_FIELD(DSO)) {
printf("("); printed += fprintf(fp, "(");
map__fprintf_dsoname(alt.map, stdout); printed += map__fprintf_dsoname(alt.map, fp);
printf(")"); printed += fprintf(fp, ")");
} }
printf("/%c/%c/%c/%d ", printed += fprintf(fp, "/%c/%c/%c/%d ",
mispred_str( br->entries + i), mispred_str( br->entries + i),
br->entries[i].flags.in_tx? 'X' : '-', br->entries[i].flags.in_tx? 'X' : '-',
br->entries[i].flags.abort? 'A' : '-', br->entries[i].flags.abort? 'A' : '-',
br->entries[i].flags.cycles); br->entries[i].flags.cycles);
} }
return printed;
} }
static void print_sample_brstacksym(struct perf_sample *sample, static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
struct thread *thread, struct thread *thread,
struct perf_event_attr *attr) struct perf_event_attr *attr, FILE *fp)
{ {
struct branch_stack *br = sample->branch_stack; struct branch_stack *br = sample->branch_stack;
struct addr_location alf, alt; struct addr_location alf, alt;
u64 i, from, to; u64 i, from, to;
int printed = 0;
if (!(br && br->nr)) if (!(br && br->nr))
return; return 0;
for (i = 0; i < br->nr; i++) { for (i = 0; i < br->nr; i++) {
...@@ -659,37 +672,40 @@ static void print_sample_brstacksym(struct perf_sample *sample, ...@@ -659,37 +672,40 @@ static void print_sample_brstacksym(struct perf_sample *sample,
if (alt.map) if (alt.map)
alt.sym = map__find_symbol(alt.map, alt.addr); alt.sym = map__find_symbol(alt.map, alt.addr);
symbol__fprintf_symname_offs(alf.sym, &alf, stdout); printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
if (PRINT_FIELD(DSO)) { if (PRINT_FIELD(DSO)) {
printf("("); printed += fprintf(fp, "(");
map__fprintf_dsoname(alf.map, stdout); printed += map__fprintf_dsoname(alf.map, fp);
printf(")"); printed += fprintf(fp, ")");
} }
putchar('/'); printed += fprintf(fp, "%c", '/');
symbol__fprintf_symname_offs(alt.sym, &alt, stdout); printed += symbol__fprintf_symname_offs(alt.sym, &alt, fp);
if (PRINT_FIELD(DSO)) { if (PRINT_FIELD(DSO)) {
printf("("); printed += fprintf(fp, "(");
map__fprintf_dsoname(alt.map, stdout); printed += map__fprintf_dsoname(alt.map, fp);
printf(")"); printed += fprintf(fp, ")");
} }
printf("/%c/%c/%c/%d ", printed += fprintf(fp, "/%c/%c/%c/%d ",
mispred_str( br->entries + i), mispred_str( br->entries + i),
br->entries[i].flags.in_tx? 'X' : '-', br->entries[i].flags.in_tx? 'X' : '-',
br->entries[i].flags.abort? 'A' : '-', br->entries[i].flags.abort? 'A' : '-',
br->entries[i].flags.cycles); br->entries[i].flags.cycles);
} }
return printed;
} }
static void print_sample_brstackoff(struct perf_sample *sample, static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
struct thread *thread, struct thread *thread,
struct perf_event_attr *attr) struct perf_event_attr *attr, FILE *fp)
{ {
struct branch_stack *br = sample->branch_stack; struct branch_stack *br = sample->branch_stack;
struct addr_location alf, alt; struct addr_location alf, alt;
u64 i, from, to; u64 i, from, to;
int printed = 0;
if (!(br && br->nr)) if (!(br && br->nr))
return; return 0;
for (i = 0; i < br->nr; i++) { for (i = 0; i < br->nr; i++) {
...@@ -706,24 +722,26 @@ static void print_sample_brstackoff(struct perf_sample *sample, ...@@ -706,24 +722,26 @@ static void print_sample_brstackoff(struct perf_sample *sample,
if (alt.map && !alt.map->dso->adjust_symbols) if (alt.map && !alt.map->dso->adjust_symbols)
to = map__map_ip(alt.map, to); to = map__map_ip(alt.map, to);
printf(" 0x%"PRIx64, from); printed += fprintf(fp, " 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) { if (PRINT_FIELD(DSO)) {
printf("("); printed += fprintf(fp, "(");
map__fprintf_dsoname(alf.map, stdout); printed += map__fprintf_dsoname(alf.map, fp);
printf(")"); printed += fprintf(fp, ")");
} }
printf("/0x%"PRIx64, to); printed += fprintf(fp, "/0x%"PRIx64, to);
if (PRINT_FIELD(DSO)) { if (PRINT_FIELD(DSO)) {
printf("("); printed += fprintf(fp, "(");
map__fprintf_dsoname(alt.map, stdout); printed += map__fprintf_dsoname(alt.map, fp);
printf(")"); printed += fprintf(fp, ")");
} }
printf("/%c/%c/%c/%d ", printed += fprintf(fp, "/%c/%c/%c/%d ",
mispred_str(br->entries + i), mispred_str(br->entries + i),
br->entries[i].flags.in_tx ? 'X' : '-', br->entries[i].flags.in_tx ? 'X' : '-',
br->entries[i].flags.abort ? 'A' : '-', br->entries[i].flags.abort ? 'A' : '-',
br->entries[i].flags.cycles); br->entries[i].flags.cycles);
} }
return printed;
} }
#define MAXBB 16384UL #define MAXBB 16384UL
...@@ -789,31 +807,30 @@ static int grab_bb(u8 *buffer, u64 start, u64 end, ...@@ -789,31 +807,30 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
return len; return len;
} }
static void print_jump(uint64_t ip, struct branch_entry *en, static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
struct perf_insn *x, u8 *inbuf, int len, struct perf_insn *x, u8 *inbuf, int len,
int insn) int insn, FILE *fp)
{ {
printf("\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", ip,
ip, dump_insn(x, ip, inbuf, len, NULL),
dump_insn(x, ip, inbuf, len, NULL), en->flags.predicted ? " PRED" : "",
en->flags.predicted ? " PRED" : "", en->flags.mispred ? " MISPRED" : "",
en->flags.mispred ? " MISPRED" : "", en->flags.in_tx ? " INTX" : "",
en->flags.in_tx ? " INTX" : "", en->flags.abort ? " ABORT" : "");
en->flags.abort ? " ABORT" : "");
if (en->flags.cycles) { if (en->flags.cycles) {
printf(" %d cycles", en->flags.cycles); printed += fprintf(fp, " %d cycles", en->flags.cycles);
if (insn) if (insn)
printf(" %.2f IPC", (float)insn / en->flags.cycles); printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles);
} }
putchar('\n'); return printed + fprintf(fp, "\n");
} }
static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu, static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
uint64_t addr, struct symbol **lastsym, u8 cpumode, int cpu, struct symbol **lastsym,
struct perf_event_attr *attr) struct perf_event_attr *attr, FILE *fp)
{ {
struct addr_location al; struct addr_location al;
int off; int off, printed = 0;
memset(&al, 0, sizeof(al)); memset(&al, 0, sizeof(al));
...@@ -822,7 +839,7 @@ static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu, ...@@ -822,7 +839,7 @@ static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu,
thread__find_addr_map(thread, cpumode, MAP__VARIABLE, thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
addr, &al); addr, &al);
if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end) if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end)
return; return 0;
al.cpu = cpu; al.cpu = cpu;
al.sym = NULL; al.sym = NULL;
...@@ -830,37 +847,39 @@ static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu, ...@@ -830,37 +847,39 @@ static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu,
al.sym = map__find_symbol(al.map, al.addr); al.sym = map__find_symbol(al.map, al.addr);
if (!al.sym) if (!al.sym)
return; return 0;
if (al.addr < al.sym->end) if (al.addr < al.sym->end)
off = al.addr - al.sym->start; off = al.addr - al.sym->start;
else else
off = al.addr - al.map->start - al.sym->start; off = al.addr - al.map->start - al.sym->start;
printf("\t%s", al.sym->name); printed += fprintf(fp, "\t%s", al.sym->name);
if (off) if (off)
printf("%+d", off); printed += fprintf(fp, "%+d", off);
putchar(':'); printed += fprintf(fp, ":");
if (PRINT_FIELD(SRCLINE)) if (PRINT_FIELD(SRCLINE))
map__fprintf_srcline(al.map, al.addr, "\t", stdout); printed += map__fprintf_srcline(al.map, al.addr, "\t", fp);
putchar('\n'); printed += fprintf(fp, "\n");
*lastsym = al.sym; *lastsym = al.sym;
return printed;
} }
static void print_sample_brstackinsn(struct perf_sample *sample, static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
struct thread *thread, struct thread *thread,
struct perf_event_attr *attr, struct perf_event_attr *attr,
struct machine *machine) struct machine *machine, FILE *fp)
{ {
struct branch_stack *br = sample->branch_stack; struct branch_stack *br = sample->branch_stack;
u64 start, end; u64 start, end;
int i, insn, len, nr, ilen; int i, insn, len, nr, ilen, printed = 0;
struct perf_insn x; struct perf_insn x;
u8 buffer[MAXBB]; u8 buffer[MAXBB];
unsigned off; unsigned off;
struct symbol *lastsym = NULL; struct symbol *lastsym = NULL;
if (!(br && br->nr)) if (!(br && br->nr))
return; return 0;
nr = br->nr; nr = br->nr;
if (max_blocks && nr > max_blocks + 1) if (max_blocks && nr > max_blocks + 1)
nr = max_blocks + 1; nr = max_blocks + 1;
...@@ -868,17 +887,17 @@ static void print_sample_brstackinsn(struct perf_sample *sample, ...@@ -868,17 +887,17 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
x.thread = thread; x.thread = thread;
x.cpu = sample->cpu; x.cpu = sample->cpu;
putchar('\n'); printed += fprintf(fp, "%c", '\n');
/* Handle first from jump, of which we don't know the entry. */ /* Handle first from jump, of which we don't know the entry. */
len = grab_bb(buffer, br->entries[nr-1].from, len = grab_bb(buffer, br->entries[nr-1].from,
br->entries[nr-1].from, br->entries[nr-1].from,
machine, thread, &x.is64bit, &x.cpumode, false); machine, thread, &x.is64bit, &x.cpumode, false);
if (len > 0) { if (len > 0) {
print_ip_sym(thread, x.cpumode, x.cpu, printed += ip__fprintf_sym(br->entries[nr - 1].from, thread,
br->entries[nr - 1].from, &lastsym, attr); x.cpumode, x.cpu, &lastsym, attr, fp);
print_jump(br->entries[nr - 1].from, &br->entries[nr - 1], printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1],
&x, buffer, len, 0); &x, buffer, len, 0, fp);
} }
/* Print all blocks */ /* Print all blocks */
...@@ -904,13 +923,13 @@ static void print_sample_brstackinsn(struct perf_sample *sample, ...@@ -904,13 +923,13 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
for (off = 0;; off += ilen) { for (off = 0;; off += ilen) {
uint64_t ip = start + off; uint64_t ip = start + off;
print_ip_sym(thread, x.cpumode, x.cpu, ip, &lastsym, attr); printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (ip == end) { if (ip == end) {
print_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn); printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp);
break; break;
} else { } else {
printf("\t%016" PRIx64 "\t%s\n", ip, printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
dump_insn(&x, ip, buffer + off, len - off, &ilen)); dump_insn(&x, ip, buffer + off, len - off, &ilen));
if (ilen == 0) if (ilen == 0)
break; break;
insn++; insn++;
...@@ -923,9 +942,9 @@ static void print_sample_brstackinsn(struct perf_sample *sample, ...@@ -923,9 +942,9 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
* has not been executed yet. * has not been executed yet.
*/ */
if (br->entries[0].from == sample->ip) if (br->entries[0].from == sample->ip)
return; goto out;
if (br->entries[0].flags.abort) if (br->entries[0].flags.abort)
return; goto out;
/* /*
* Print final block upto sample * Print final block upto sample
...@@ -933,58 +952,61 @@ static void print_sample_brstackinsn(struct perf_sample *sample, ...@@ -933,58 +952,61 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
start = br->entries[0].to; start = br->entries[0].to;
end = sample->ip; end = sample->ip;
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true); len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
print_ip_sym(thread, x.cpumode, x.cpu, start, &lastsym, attr); printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (len <= 0) { if (len <= 0) {
/* Print at least last IP if basic block did not work */ /* Print at least last IP if basic block did not work */
len = grab_bb(buffer, sample->ip, sample->ip, len = grab_bb(buffer, sample->ip, sample->ip,
machine, thread, &x.is64bit, &x.cpumode, false); machine, thread, &x.is64bit, &x.cpumode, false);
if (len <= 0) if (len <= 0)
return; goto out;
printf("\t%016" PRIx64 "\t%s\n", sample->ip, printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip,
dump_insn(&x, sample->ip, buffer, len, NULL)); dump_insn(&x, sample->ip, buffer, len, NULL));
return; goto out;
} }
for (off = 0; off <= end - start; off += ilen) { for (off = 0; off <= end - start; off += ilen) {
printf("\t%016" PRIx64 "\t%s\n", start + off, printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off,
dump_insn(&x, start + off, buffer + off, len - off, &ilen)); dump_insn(&x, start + off, buffer + off, len - off, &ilen));
if (ilen == 0) if (ilen == 0)
break; break;
} }
out:
return printed;
} }
static void print_sample_addr(struct perf_sample *sample, static int perf_sample__fprintf_addr(struct perf_sample *sample,
struct thread *thread, struct thread *thread,
struct perf_event_attr *attr) struct perf_event_attr *attr, FILE *fp)
{ {
struct addr_location al; struct addr_location al;
int printed = fprintf(fp, "%16" PRIx64, sample->addr);
printf("%16" PRIx64, sample->addr);
if (!sample_addr_correlates_sym(attr)) if (!sample_addr_correlates_sym(attr))
return; goto out;
thread__resolve(thread, &al, sample); thread__resolve(thread, &al, sample);
if (PRINT_FIELD(SYM)) { if (PRINT_FIELD(SYM)) {
printf(" "); printed += fprintf(fp, " ");
if (PRINT_FIELD(SYMOFFSET)) if (PRINT_FIELD(SYMOFFSET))
symbol__fprintf_symname_offs(al.sym, &al, stdout); printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
else else
symbol__fprintf_symname(al.sym, stdout); printed += symbol__fprintf_symname(al.sym, fp);
} }
if (PRINT_FIELD(DSO)) { if (PRINT_FIELD(DSO)) {
printf(" ("); printed += fprintf(fp, " (");
map__fprintf_dsoname(al.map, stdout); printed += map__fprintf_dsoname(al.map, fp);
printf(")"); printed += fprintf(fp, ")");
} }
out:
return printed;
} }
static void print_sample_callindent(struct perf_sample *sample, static int perf_sample__fprintf_callindent(struct perf_sample *sample,
struct perf_evsel *evsel, struct perf_evsel *evsel,
struct thread *thread, struct thread *thread,
struct addr_location *al) struct addr_location *al, FILE *fp)
{ {
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
size_t depth = thread_stack__depth(thread); size_t depth = thread_stack__depth(thread);
...@@ -1019,12 +1041,12 @@ static void print_sample_callindent(struct perf_sample *sample, ...@@ -1019,12 +1041,12 @@ static void print_sample_callindent(struct perf_sample *sample,
} }
if (name) if (name)
len = printf("%*s%s", (int)depth * 4, "", name); len = fprintf(fp, "%*s%s", (int)depth * 4, "", name);
else if (ip) else if (ip)
len = printf("%*s%16" PRIx64, (int)depth * 4, "", ip); len = fprintf(fp, "%*s%16" PRIx64, (int)depth * 4, "", ip);
if (len < 0) if (len < 0)
return; return len;
/* /*
* Try to keep the output length from changing frequently so that the * Try to keep the output length from changing frequently so that the
...@@ -1034,39 +1056,46 @@ static void print_sample_callindent(struct perf_sample *sample, ...@@ -1034,39 +1056,46 @@ static void print_sample_callindent(struct perf_sample *sample,
spacing = round_up(len + 4, 32); spacing = round_up(len + 4, 32);
if (len < spacing) if (len < spacing)
printf("%*s", spacing - len, ""); len += fprintf(fp, "%*s", spacing - len, "");
return len;
} }
static void print_insn(struct perf_sample *sample, static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct perf_event_attr *attr, struct perf_event_attr *attr,
struct thread *thread, struct thread *thread,
struct machine *machine) struct machine *machine, FILE *fp)
{ {
int printed = 0;
if (PRINT_FIELD(INSNLEN)) if (PRINT_FIELD(INSNLEN))
printf(" ilen: %d", sample->insn_len); printed += fprintf(fp, " ilen: %d", sample->insn_len);
if (PRINT_FIELD(INSN)) { if (PRINT_FIELD(INSN)) {
int i; int i;
printf(" insn:"); printed += fprintf(fp, " insn:");
for (i = 0; i < sample->insn_len; i++) for (i = 0; i < sample->insn_len; i++)
printf(" %02x", (unsigned char)sample->insn[i]); printed += fprintf(fp, " %02x", (unsigned char)sample->insn[i]);
} }
if (PRINT_FIELD(BRSTACKINSN)) if (PRINT_FIELD(BRSTACKINSN))
print_sample_brstackinsn(sample, thread, attr, machine); printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp);
return printed;
} }
static void print_sample_bts(struct perf_sample *sample, static int perf_sample__fprintf_bts(struct perf_sample *sample,
struct perf_evsel *evsel, struct perf_evsel *evsel,
struct thread *thread, struct thread *thread,
struct addr_location *al, struct addr_location *al,
struct machine *machine) struct machine *machine, FILE *fp)
{ {
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
unsigned int type = output_type(attr->type); unsigned int type = output_type(attr->type);
bool print_srcline_last = false; bool print_srcline_last = false;
int printed = 0;
if (PRINT_FIELD(CALLINDENT)) if (PRINT_FIELD(CALLINDENT))
print_sample_callindent(sample, evsel, thread, al); printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, fp);
/* print branch_from information */ /* print branch_from information */
if (PRINT_FIELD(IP)) { if (PRINT_FIELD(IP)) {
...@@ -1079,31 +1108,30 @@ static void print_sample_bts(struct perf_sample *sample, ...@@ -1079,31 +1108,30 @@ static void print_sample_bts(struct perf_sample *sample,
cursor = &callchain_cursor; cursor = &callchain_cursor;
if (cursor == NULL) { if (cursor == NULL) {
putchar(' '); printed += fprintf(fp, " ");
if (print_opts & EVSEL__PRINT_SRCLINE) { if (print_opts & EVSEL__PRINT_SRCLINE) {
print_srcline_last = true; print_srcline_last = true;
print_opts &= ~EVSEL__PRINT_SRCLINE; print_opts &= ~EVSEL__PRINT_SRCLINE;
} }
} else } else
putchar('\n'); printed += fprintf(fp, "\n");
sample__fprintf_sym(sample, al, 0, print_opts, cursor, stdout); printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor, fp);
} }
/* print branch_to information */ /* print branch_to information */
if (PRINT_FIELD(ADDR) || if (PRINT_FIELD(ADDR) ||
((evsel->attr.sample_type & PERF_SAMPLE_ADDR) && ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
!output[type].user_set)) { !output[type].user_set)) {
printf(" => "); printed += fprintf(fp, " => ");
print_sample_addr(sample, thread, attr); printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
} }
if (print_srcline_last) if (print_srcline_last)
map__fprintf_srcline(al->map, al->addr, "\n ", stdout); printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
print_insn(sample, attr, thread, machine);
printf("\n"); printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
return printed + fprintf(fp, "\n");
} }
static struct { static struct {
...@@ -1126,7 +1154,7 @@ static struct { ...@@ -1126,7 +1154,7 @@ static struct {
{0, NULL} {0, NULL}
}; };
static void print_sample_flags(u32 flags) static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
{ {
const char *chars = PERF_IP_FLAG_CHARS; const char *chars = PERF_IP_FLAG_CHARS;
const int n = strlen(PERF_IP_FLAG_CHARS); const int n = strlen(PERF_IP_FLAG_CHARS);
...@@ -1153,9 +1181,9 @@ static void print_sample_flags(u32 flags) ...@@ -1153,9 +1181,9 @@ static void print_sample_flags(u32 flags)
str[pos] = 0; str[pos] = 0;
if (name) if (name)
printf(" %-7s%4s ", name, in_tx ? "(x)" : ""); return fprintf(fp, " %-7s%4s ", name, in_tx ? "(x)" : "");
else
printf(" %-11s ", str); return fprintf(fp, " %-11s ", str);
} }
struct printer_data { struct printer_data {
...@@ -1164,40 +1192,40 @@ struct printer_data { ...@@ -1164,40 +1192,40 @@ struct printer_data {
bool is_printable; bool is_printable;
}; };
static void static int sample__fprintf_bpf_output(enum binary_printer_ops op,
print_sample_bpf_output_printer(enum binary_printer_ops op, unsigned int val,
unsigned int val, void *extra, FILE *fp)
void *extra)
{ {
unsigned char ch = (unsigned char)val; unsigned char ch = (unsigned char)val;
struct printer_data *printer_data = extra; struct printer_data *printer_data = extra;
int printed = 0;
switch (op) { switch (op) {
case BINARY_PRINT_DATA_BEGIN: case BINARY_PRINT_DATA_BEGIN:
printf("\n"); printed += fprintf(fp, "\n");
break; break;
case BINARY_PRINT_LINE_BEGIN: case BINARY_PRINT_LINE_BEGIN:
printf("%17s", !printer_data->line_no ? "BPF output:" : printed += fprintf(fp, "%17s", !printer_data->line_no ? "BPF output:" :
" "); " ");
break; break;
case BINARY_PRINT_ADDR: case BINARY_PRINT_ADDR:
printf(" %04x:", val); printed += fprintf(fp, " %04x:", val);
break; break;
case BINARY_PRINT_NUM_DATA: case BINARY_PRINT_NUM_DATA:
printf(" %02x", val); printed += fprintf(fp, " %02x", val);
break; break;
case BINARY_PRINT_NUM_PAD: case BINARY_PRINT_NUM_PAD:
printf(" "); printed += fprintf(fp, " ");
break; break;
case BINARY_PRINT_SEP: case BINARY_PRINT_SEP:
printf(" "); printed += fprintf(fp, " ");
break; break;
case BINARY_PRINT_CHAR_DATA: case BINARY_PRINT_CHAR_DATA:
if (printer_data->hit_nul && ch) if (printer_data->hit_nul && ch)
printer_data->is_printable = false; printer_data->is_printable = false;
if (!isprint(ch)) { if (!isprint(ch)) {
printf("%c", '.'); printed += fprintf(fp, "%c", '.');
if (!printer_data->is_printable) if (!printer_data->is_printable)
break; break;
...@@ -1207,154 +1235,154 @@ print_sample_bpf_output_printer(enum binary_printer_ops op, ...@@ -1207,154 +1235,154 @@ print_sample_bpf_output_printer(enum binary_printer_ops op,
else else
printer_data->is_printable = false; printer_data->is_printable = false;
} else { } else {
printf("%c", ch); printed += fprintf(fp, "%c", ch);
} }
break; break;
case BINARY_PRINT_CHAR_PAD: case BINARY_PRINT_CHAR_PAD:
printf(" "); printed += fprintf(fp, " ");
break; break;
case BINARY_PRINT_LINE_END: case BINARY_PRINT_LINE_END:
printf("\n"); printed += fprintf(fp, "\n");
printer_data->line_no++; printer_data->line_no++;
break; break;
case BINARY_PRINT_DATA_END: case BINARY_PRINT_DATA_END:
default: default:
break; break;
} }
return printed;
} }
static void print_sample_bpf_output(struct perf_sample *sample) static int perf_sample__fprintf_bpf_output(struct perf_sample *sample, FILE *fp)
{ {
unsigned int nr_bytes = sample->raw_size; unsigned int nr_bytes = sample->raw_size;
struct printer_data printer_data = {0, false, true}; struct printer_data printer_data = {0, false, true};
int printed = binary__fprintf(sample->raw_data, nr_bytes, 8,
print_binary(sample->raw_data, nr_bytes, 8, sample__fprintf_bpf_output, &printer_data, fp);
print_sample_bpf_output_printer, &printer_data);
if (printer_data.is_printable && printer_data.hit_nul) if (printer_data.is_printable && printer_data.hit_nul)
printf("%17s \"%s\"\n", "BPF string:", printed += fprintf(fp, "%17s \"%s\"\n", "BPF string:", (char *)(sample->raw_data));
(char *)(sample->raw_data));
return printed;
} }
static void print_sample_spacing(int len, int spacing) static int perf_sample__fprintf_spacing(int len, int spacing, FILE *fp)
{ {
if (len > 0 && len < spacing) if (len > 0 && len < spacing)
printf("%*s", spacing - len, ""); return fprintf(fp, "%*s", spacing - len, "");
return 0;
} }
static void print_sample_pt_spacing(int len) static int perf_sample__fprintf_pt_spacing(int len, FILE *fp)
{ {
print_sample_spacing(len, 34); return perf_sample__fprintf_spacing(len, 34, fp);
} }
static void print_sample_synth_ptwrite(struct perf_sample *sample) static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
{ {
struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample); struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
int len; int len;
if (perf_sample__bad_synth_size(sample, *data)) if (perf_sample__bad_synth_size(sample, *data))
return; return 0;
len = printf(" IP: %u payload: %#" PRIx64 " ", len = fprintf(fp, " IP: %u payload: %#" PRIx64 " ",
data->ip, le64_to_cpu(data->payload)); data->ip, le64_to_cpu(data->payload));
print_sample_pt_spacing(len); return len + perf_sample__fprintf_pt_spacing(len, fp);
} }
static void print_sample_synth_mwait(struct perf_sample *sample) static int perf_sample__fprintf_synth_mwait(struct perf_sample *sample, FILE *fp)
{ {
struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample); struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
int len; int len;
if (perf_sample__bad_synth_size(sample, *data)) if (perf_sample__bad_synth_size(sample, *data))
return; return 0;
len = printf(" hints: %#x extensions: %#x ", len = fprintf(fp, " hints: %#x extensions: %#x ",
data->hints, data->extensions); data->hints, data->extensions);
print_sample_pt_spacing(len); return len + perf_sample__fprintf_pt_spacing(len, fp);
} }
static void print_sample_synth_pwre(struct perf_sample *sample) static int perf_sample__fprintf_synth_pwre(struct perf_sample *sample, FILE *fp)
{ {
struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample); struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
int len; int len;
if (perf_sample__bad_synth_size(sample, *data)) if (perf_sample__bad_synth_size(sample, *data))
return; return 0;
len = printf(" hw: %u cstate: %u sub-cstate: %u ", len = fprintf(fp, " hw: %u cstate: %u sub-cstate: %u ",
data->hw, data->cstate, data->subcstate); data->hw, data->cstate, data->subcstate);
print_sample_pt_spacing(len); return len + perf_sample__fprintf_pt_spacing(len, fp);
} }
static void print_sample_synth_exstop(struct perf_sample *sample) static int perf_sample__fprintf_synth_exstop(struct perf_sample *sample, FILE *fp)
{ {
struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample); struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
int len; int len;
if (perf_sample__bad_synth_size(sample, *data)) if (perf_sample__bad_synth_size(sample, *data))
return; return 0;
len = printf(" IP: %u ", data->ip); len = fprintf(fp, " IP: %u ", data->ip);
print_sample_pt_spacing(len); return len + perf_sample__fprintf_pt_spacing(len, fp);
} }
static void print_sample_synth_pwrx(struct perf_sample *sample) static int perf_sample__fprintf_synth_pwrx(struct perf_sample *sample, FILE *fp)
{ {
struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample); struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
int len; int len;
if (perf_sample__bad_synth_size(sample, *data)) if (perf_sample__bad_synth_size(sample, *data))
return; return 0;
len = printf(" deepest cstate: %u last cstate: %u wake reason: %#x ", len = fprintf(fp, " deepest cstate: %u last cstate: %u wake reason: %#x ",
data->deepest_cstate, data->last_cstate, data->deepest_cstate, data->last_cstate,
data->wake_reason); data->wake_reason);
print_sample_pt_spacing(len); return len + perf_sample__fprintf_pt_spacing(len, fp);
} }
static void print_sample_synth_cbr(struct perf_sample *sample) static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
{ {
struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample); struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
unsigned int percent, freq; unsigned int percent, freq;
int len; int len;
if (perf_sample__bad_synth_size(sample, *data)) if (perf_sample__bad_synth_size(sample, *data))
return; return 0;
freq = (le32_to_cpu(data->freq) + 500) / 1000; freq = (le32_to_cpu(data->freq) + 500) / 1000;
len = printf(" cbr: %2u freq: %4u MHz ", data->cbr, freq); len = fprintf(fp, " cbr: %2u freq: %4u MHz ", data->cbr, freq);
if (data->max_nonturbo) { if (data->max_nonturbo) {
percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10; percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
len += printf("(%3u%%) ", percent); len += fprintf(fp, "(%3u%%) ", percent);
} }
print_sample_pt_spacing(len); return len + perf_sample__fprintf_pt_spacing(len, fp);
} }
static void print_sample_synth(struct perf_sample *sample, static int perf_sample__fprintf_synth(struct perf_sample *sample,
struct perf_evsel *evsel) struct perf_evsel *evsel, FILE *fp)
{ {
switch (evsel->attr.config) { switch (evsel->attr.config) {
case PERF_SYNTH_INTEL_PTWRITE: case PERF_SYNTH_INTEL_PTWRITE:
print_sample_synth_ptwrite(sample); return perf_sample__fprintf_synth_ptwrite(sample, fp);
break;
case PERF_SYNTH_INTEL_MWAIT: case PERF_SYNTH_INTEL_MWAIT:
print_sample_synth_mwait(sample); return perf_sample__fprintf_synth_mwait(sample, fp);
break;
case PERF_SYNTH_INTEL_PWRE: case PERF_SYNTH_INTEL_PWRE:
print_sample_synth_pwre(sample); return perf_sample__fprintf_synth_pwre(sample, fp);
break;
case PERF_SYNTH_INTEL_EXSTOP: case PERF_SYNTH_INTEL_EXSTOP:
print_sample_synth_exstop(sample); return perf_sample__fprintf_synth_exstop(sample, fp);
break;
case PERF_SYNTH_INTEL_PWRX: case PERF_SYNTH_INTEL_PWRX:
print_sample_synth_pwrx(sample); return perf_sample__fprintf_synth_pwrx(sample, fp);
break;
case PERF_SYNTH_INTEL_CBR: case PERF_SYNTH_INTEL_CBR:
print_sample_synth_cbr(sample); return perf_sample__fprintf_synth_cbr(sample, fp);
break;
default: default:
break; break;
} }
return 0;
} }
struct perf_script { struct perf_script {
...@@ -1386,7 +1414,7 @@ static int perf_evlist__max_name_len(struct perf_evlist *evlist) ...@@ -1386,7 +1414,7 @@ static int perf_evlist__max_name_len(struct perf_evlist *evlist)
return max; return max;
} }
static size_t data_src__printf(u64 data_src) static int data_src__fprintf(u64 data_src, FILE *fp)
{ {
struct mem_info mi = { .data_src.val = data_src }; struct mem_info mi = { .data_src.val = data_src };
char decode[100]; char decode[100];
...@@ -1400,7 +1428,7 @@ static size_t data_src__printf(u64 data_src) ...@@ -1400,7 +1428,7 @@ static size_t data_src__printf(u64 data_src)
if (maxlen < len) if (maxlen < len)
maxlen = len; maxlen = len;
return printf("%-*s", maxlen, out); return fprintf(fp, "%-*s", maxlen, out);
} }
static void process_event(struct perf_script *script, static void process_event(struct perf_script *script,
...@@ -1411,11 +1439,12 @@ static void process_event(struct perf_script *script, ...@@ -1411,11 +1439,12 @@ static void process_event(struct perf_script *script,
struct thread *thread = al->thread; struct thread *thread = al->thread;
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
unsigned int type = output_type(attr->type); unsigned int type = output_type(attr->type);
FILE *fp = stdout;
if (output[type].fields == 0) if (output[type].fields == 0)
return; return;
print_sample_start(sample, thread, evsel); perf_sample__fprintf_start(sample, thread, evsel, fp);
if (PRINT_FIELD(PERIOD)) if (PRINT_FIELD(PERIOD))
printf("%10" PRIu64 " ", sample->period); printf("%10" PRIu64 " ", sample->period);
...@@ -1431,10 +1460,10 @@ static void process_event(struct perf_script *script, ...@@ -1431,10 +1460,10 @@ static void process_event(struct perf_script *script,
} }
if (print_flags) if (print_flags)
print_sample_flags(sample->flags); perf_sample__fprintf_flags(sample->flags, fp);
if (is_bts_event(attr)) { if (is_bts_event(attr)) {
print_sample_bts(sample, evsel, thread, al, machine); perf_sample__fprintf_bts(sample, evsel, thread, al, machine, fp);
return; return;
} }
...@@ -1443,16 +1472,16 @@ static void process_event(struct perf_script *script, ...@@ -1443,16 +1472,16 @@ static void process_event(struct perf_script *script,
sample->raw_data, sample->raw_size); sample->raw_data, sample->raw_size);
if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH)) if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
print_sample_synth(sample, evsel); perf_sample__fprintf_synth(sample, evsel, fp);
if (PRINT_FIELD(ADDR)) if (PRINT_FIELD(ADDR))
print_sample_addr(sample, thread, attr); perf_sample__fprintf_addr(sample, thread, attr, fp);
if (PRINT_FIELD(DATA_SRC)) if (PRINT_FIELD(DATA_SRC))
data_src__printf(sample->data_src); data_src__fprintf(sample->data_src, fp);
if (PRINT_FIELD(WEIGHT)) if (PRINT_FIELD(WEIGHT))
printf("%16" PRIu64, sample->weight); fprintf(fp, "%16" PRIu64, sample->weight);
if (PRINT_FIELD(IP)) { if (PRINT_FIELD(IP)) {
struct callchain_cursor *cursor = NULL; struct callchain_cursor *cursor = NULL;
...@@ -1462,26 +1491,26 @@ static void process_event(struct perf_script *script, ...@@ -1462,26 +1491,26 @@ static void process_event(struct perf_script *script,
sample, NULL, NULL, scripting_max_stack) == 0) sample, NULL, NULL, scripting_max_stack) == 0)
cursor = &callchain_cursor; cursor = &callchain_cursor;
putchar(cursor ? '\n' : ' '); fputc(cursor ? '\n' : ' ', fp);
sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, stdout); sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, fp);
} }
if (PRINT_FIELD(IREGS)) if (PRINT_FIELD(IREGS))
print_sample_iregs(sample, attr); perf_sample__fprintf_iregs(sample, attr, fp);
if (PRINT_FIELD(UREGS)) if (PRINT_FIELD(UREGS))
print_sample_uregs(sample, attr); perf_sample__fprintf_uregs(sample, attr, fp);
if (PRINT_FIELD(BRSTACK)) if (PRINT_FIELD(BRSTACK))
print_sample_brstack(sample, thread, attr); perf_sample__fprintf_brstack(sample, thread, attr, fp);
else if (PRINT_FIELD(BRSTACKSYM)) else if (PRINT_FIELD(BRSTACKSYM))
print_sample_brstacksym(sample, thread, attr); perf_sample__fprintf_brstacksym(sample, thread, attr, fp);
else if (PRINT_FIELD(BRSTACKOFF)) else if (PRINT_FIELD(BRSTACKOFF))
print_sample_brstackoff(sample, thread, attr); perf_sample__fprintf_brstackoff(sample, thread, attr, fp);
if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT)) if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
print_sample_bpf_output(sample); perf_sample__fprintf_bpf_output(sample, fp);
print_insn(sample, attr, thread, machine); perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
if (PRINT_FIELD(PHYS_ADDR)) if (PRINT_FIELD(PHYS_ADDR))
printf("%16" PRIx64, sample->phys_addr); printf("%16" PRIx64, sample->phys_addr);
...@@ -1659,7 +1688,7 @@ static int process_comm_event(struct perf_tool *tool, ...@@ -1659,7 +1688,7 @@ static int process_comm_event(struct perf_tool *tool,
sample->tid = event->comm.tid; sample->tid = event->comm.tid;
sample->pid = event->comm.pid; sample->pid = event->comm.pid;
} }
print_sample_start(sample, thread, evsel); perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout); perf_event__fprintf(event, stdout);
ret = 0; ret = 0;
out: out:
...@@ -1694,7 +1723,7 @@ static int process_namespaces_event(struct perf_tool *tool, ...@@ -1694,7 +1723,7 @@ static int process_namespaces_event(struct perf_tool *tool,
sample->tid = event->namespaces.tid; sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid; sample->pid = event->namespaces.pid;
} }
print_sample_start(sample, thread, evsel); perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout); perf_event__fprintf(event, stdout);
ret = 0; ret = 0;
out: out:
...@@ -1727,7 +1756,7 @@ static int process_fork_event(struct perf_tool *tool, ...@@ -1727,7 +1756,7 @@ static int process_fork_event(struct perf_tool *tool,
sample->tid = event->fork.tid; sample->tid = event->fork.tid;
sample->pid = event->fork.pid; sample->pid = event->fork.pid;
} }
print_sample_start(sample, thread, evsel); perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout); perf_event__fprintf(event, stdout);
thread__put(thread); thread__put(thread);
...@@ -1756,7 +1785,7 @@ static int process_exit_event(struct perf_tool *tool, ...@@ -1756,7 +1785,7 @@ static int process_exit_event(struct perf_tool *tool,
sample->tid = event->fork.tid; sample->tid = event->fork.tid;
sample->pid = event->fork.pid; sample->pid = event->fork.pid;
} }
print_sample_start(sample, thread, evsel); perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout); perf_event__fprintf(event, stdout);
if (perf_event__process_exit(tool, event, sample, machine) < 0) if (perf_event__process_exit(tool, event, sample, machine) < 0)
...@@ -1791,7 +1820,7 @@ static int process_mmap_event(struct perf_tool *tool, ...@@ -1791,7 +1820,7 @@ static int process_mmap_event(struct perf_tool *tool,
sample->tid = event->mmap.tid; sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid; sample->pid = event->mmap.pid;
} }
print_sample_start(sample, thread, evsel); perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout); perf_event__fprintf(event, stdout);
thread__put(thread); thread__put(thread);
return 0; return 0;
...@@ -1822,7 +1851,7 @@ static int process_mmap2_event(struct perf_tool *tool, ...@@ -1822,7 +1851,7 @@ static int process_mmap2_event(struct perf_tool *tool,
sample->tid = event->mmap2.tid; sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid; sample->pid = event->mmap2.pid;
} }
print_sample_start(sample, thread, evsel); perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout); perf_event__fprintf(event, stdout);
thread__put(thread); thread__put(thread);
return 0; return 0;
...@@ -1848,7 +1877,7 @@ static int process_switch_event(struct perf_tool *tool, ...@@ -1848,7 +1877,7 @@ static int process_switch_event(struct perf_tool *tool,
return -1; return -1;
} }
print_sample_start(sample, thread, evsel); perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout); perf_event__fprintf(event, stdout);
thread__put(thread); thread__put(thread);
return 0; return 0;
...@@ -3045,7 +3074,8 @@ int cmd_script(int argc, const char **argv) ...@@ -3045,7 +3074,8 @@ int cmd_script(int argc, const char **argv)
machine__resolve_kernel_addr, machine__resolve_kernel_addr,
&session->machines.host) < 0) { &session->machines.host) < 0) {
pr_err("%s: failed to set libtraceevent function resolver\n", __func__); pr_err("%s: failed to set libtraceevent function resolver\n", __func__);
return -1; err = -1;
goto out_delete;
} }
if (generate_script_lang) { if (generate_script_lang) {
...@@ -3105,7 +3135,8 @@ int cmd_script(int argc, const char **argv) ...@@ -3105,7 +3135,8 @@ int cmd_script(int argc, const char **argv)
/* needs to be parsed after looking up reference time */ /* needs to be parsed after looking up reference time */
if (perf_time__parse_str(&script.ptime, script.time_str) != 0) { if (perf_time__parse_str(&script.ptime, script.time_str) != 0) {
pr_err("Invalid time string\n"); pr_err("Invalid time string\n");
return -EINVAL; err = -EINVAL;
goto out_delete;
} }
err = __cmd_script(&script); err = __cmd_script(&script);
......
...@@ -1828,16 +1828,14 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs ...@@ -1828,16 +1828,14 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
goto out_put; goto out_put;
} }
static void bpf_output__printer(enum binary_printer_ops op, static int bpf_output__printer(enum binary_printer_ops op,
unsigned int val, void *extra) unsigned int val, void *extra __maybe_unused, FILE *fp)
{ {
FILE *output = extra;
unsigned char ch = (unsigned char)val; unsigned char ch = (unsigned char)val;
switch (op) { switch (op) {
case BINARY_PRINT_CHAR_DATA: case BINARY_PRINT_CHAR_DATA:
fprintf(output, "%c", isprint(ch) ? ch : '.'); return fprintf(fp, "%c", isprint(ch) ? ch : '.');
break;
case BINARY_PRINT_DATA_BEGIN: case BINARY_PRINT_DATA_BEGIN:
case BINARY_PRINT_LINE_BEGIN: case BINARY_PRINT_LINE_BEGIN:
case BINARY_PRINT_ADDR: case BINARY_PRINT_ADDR:
...@@ -1850,13 +1848,15 @@ static void bpf_output__printer(enum binary_printer_ops op, ...@@ -1850,13 +1848,15 @@ static void bpf_output__printer(enum binary_printer_ops op,
default: default:
break; break;
} }
return 0;
} }
static void bpf_output__fprintf(struct trace *trace, static void bpf_output__fprintf(struct trace *trace,
struct perf_sample *sample) struct perf_sample *sample)
{ {
print_binary(sample->raw_data, sample->raw_size, 8, binary__fprintf(sample->raw_data, sample->raw_size, 8,
bpf_output__printer, trace->output); bpf_output__printer, NULL, trace->output);
} }
static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel, static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
......
...@@ -57,6 +57,11 @@ check () { ...@@ -57,6 +57,11 @@ check () {
} }
# Check if we have the kernel headers (tools/perf/../../include), else
# we're probably on a detached tarball, so no point in trying to check
# differences.
test -d ../../include || exit 0
# simple diff check # simple diff check
for i in $HEADERS; do for i in $HEADERS; do
check $i -B check $i -B
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )", "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,19 +49,19 @@ ...@@ -49,19 +49,19 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
{ {
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED ) ) / RS_EVENTS.EMPTY_END", "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches", "MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost" "MetricName": "BAClear_Cost"
}, },
...@@ -79,13 +79,13 @@ ...@@ -79,13 +79,13 @@
}, },
{ {
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES", "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW", "MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP" "MetricName": "MLP"
}, },
{ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB", "MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization" "MetricName": "Page_Walks_Utilization"
}, },
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
}, },
{ {
"BriefDescription": "Giga Floating Point Operations Per Second", "BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time", "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary", "MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs" "MetricName": "GFLOPs"
}, },
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )", "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,19 +49,19 @@ ...@@ -49,19 +49,19 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
{ {
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED ) ) / RS_EVENTS.EMPTY_END", "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches", "MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost" "MetricName": "BAClear_Cost"
}, },
...@@ -79,13 +79,13 @@ ...@@ -79,13 +79,13 @@
}, },
{ {
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES", "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW", "MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP" "MetricName": "MLP"
}, },
{ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles))",
"MetricGroup": "TLB", "MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization" "MetricName": "Page_Walks_Utilization"
}, },
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
}, },
{ {
"BriefDescription": "Giga Floating Point Operations Per Second", "BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time", "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary", "MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs" "MetricName": "GFLOPs"
}, },
......
[
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
"EventCode": "0x2E",
"Counter": "0,1,2,3",
"UMask": "0x41",
"PEBScounters": "0,1,2,3",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "L2 cache request misses"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
"EventCode": "0x2E",
"Counter": "0,1,2,3",
"UMask": "0x4f",
"PEBScounters": "0,1,2,3",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "L2 cache requests"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
"EventCode": "0x30",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "L2_REJECT_XQ.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Requests rejected by the XQ"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
"EventCode": "0x31",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "CORE_REJECT_L2Q.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Requests rejected by the L2Q"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "DL1.REPLACEMENT",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "L1 Cache evictions for dirty data"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
"EventCode": "0x86",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
},
{
"CollectPEBSRecord": "1",
"EventCode": "0xB7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE",
"PDIR_COUNTER": "na",
"SampleAfterValue": "100007",
"BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x21",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "200003",
"BriefDescription": "Locked load uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "200003",
"BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x43",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
"SampleAfterValue": "200003",
"BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts the number of load uops retired.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x81",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts the number of store uops retired.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x82",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "200003",
"BriefDescription": "Store uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x83",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL",
"SampleAfterValue": "200003",
"BriefDescription": "Memory uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts load uops retired that hit the L1 data cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts load uops retired that hit in the L2 cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts load uops retired that miss the L1 data cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts load uops retired that miss in the L2 cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"SampleAfterValue": "200003",
"BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "Loads retired that hit WCB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x80",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010400",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts bus lock and split lock requests hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040400",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts bus lock and split lock requests hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000400",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000400",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000400",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000011000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000041000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000012000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000042000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000014800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000044800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200004800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000004800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000004800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000018000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000048000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts requests to the uncore subsystem hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000013010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000043010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000013091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000043091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads (demand & prefetch) hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200003091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000003091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000003091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x00000132b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x00000432b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x02000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x10000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x40000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"Offcore": "1"
}
]
\ No newline at end of file
[
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
"EventCode": "0x80",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "ICACHE.HIT",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
"EventCode": "0x80",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "ICACHE.MISSES",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
"EventCode": "0x80",
"Counter": "0,1,2,3",
"UMask": "0x3",
"PEBScounters": "0,1,2,3",
"EventName": "ICACHE.ACCESSES",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
"EventCode": "0xE7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "MS_DECODED.MS_ENTRY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "MS decode starts"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
"EventCode": "0xE9",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Decode restrictions due to predicting wrong instruction length"
}
]
\ No newline at end of file
[
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
"EventCode": "0x13",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops that split a page (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
"EventCode": "0x13",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"SampleAfterValue": "200003",
"BriefDescription": "Store uops that split a page (Precise event capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Machine clears due to memory ordering issue"
}
]
\ No newline at end of file
[
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
"EventCode": "0x86",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "FETCH_STALL.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles code-fetch stalled due to any reason."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
"EventCode": "0x86",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Unfilled issue slots per cycle"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Unfilled issue slots per cycle to recover"
},
{
"CollectPEBSRecord": "2",
"PublicDescription": "Counts hardware interrupts received by the processor.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "HW_INTERRUPTS.RECEIVED",
"PDIR_COUNTER": "na",
"SampleAfterValue": "203",
"BriefDescription": "Hardware interrupts received"
},
{
"CollectPEBSRecord": "2",
"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "HW_INTERRUPTS.MASKED",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles hardware interrupts are masked"
},
{
"CollectPEBSRecord": "2",
"PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles pending interrupts are masked"
}
]
\ No newline at end of file
[
{
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
"EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"PEBScounters": "32",
"EventName": "INST_RETIRED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired (Fixed event)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
"EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"PEBScounters": "33",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when core is not halted (Fixed event)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
"EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"PEBScounters": "34",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when core is not halted (Fixed event)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.4K_ALIAS",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.UTLB_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "LD_BLOCKS.ALL_BLOCK",
"SampleAfterValue": "200003",
"BriefDescription": "Loads blocked (Precise event capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_ISSUED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Uops issued to the back end per cycle"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when core is not halted"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "CPU_CLK_UNHALTED.REF",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when core is not halted"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_NOT_DELIVERED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
},
{
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
"EventCode": "0xC0",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts INST_RETIRED.ANY using the Reduced Skid PEBS feature that reduces the shadow in which events aren't counted allowing for a more unbiased distribution of samples across instructions retired.",
"EventCode": "0xC0",
"Counter": "0,1,2,3",
"UMask": "0x0",
"EventName": "INST_RETIRED.PREC_DIST",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired - using Reduced Skid PEBS feature"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts uops which retired.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_RETIRED.ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_RETIRED.MS",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "MS uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of floating point divide uops retired.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_RETIRED.FPDIV",
"SampleAfterValue": "2000003",
"BriefDescription": "Floating point divide uops retired (Precise Event Capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of integer divide uops retired.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "UOPS_RETIRED.IDIV",
"SampleAfterValue": "2000003",
"BriefDescription": "Integer divide uops retired (Precise Event Capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts machine clears for any reason.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "All machine clears"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.SMC",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Self-Modifying Code detected"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Machine clears due to FP assists"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Machine clears due to memory disambiguation"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x20",
"PEBScounters": "0,1,2,3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "Machines clear due to a page fault"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "200003",
"BriefDescription": "Retired branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x7e",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.JCC",
"SampleAfterValue": "200003",
"BriefDescription": "Retired conditional branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts the number of taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x80",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"SampleAfterValue": "200003",
"BriefDescription": "Retired taken branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xbf",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "200003",
"BriefDescription": "Retired far branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xeb",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"SampleAfterValue": "200003",
"BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near return branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xf7",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.RETURN",
"SampleAfterValue": "200003",
"BriefDescription": "Retired near return instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near CALL branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xf9",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CALL",
"SampleAfterValue": "200003",
"BriefDescription": "Retired near call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near indirect CALL branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xfb",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
"BriefDescription": "Retired near indirect call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts near relative CALL branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xfd",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.REL_CALL",
"SampleAfterValue": "200003",
"BriefDescription": "Retired near relative call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0xfe",
"PEBScounters": "0,1,2,3",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"SampleAfterValue": "200003",
"BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x7e",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.JCC",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0xeb",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0xf7",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.RETURN",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0xfb",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0xfe",
"PEBScounters": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"SampleAfterValue": "200003",
"BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts core cycles if either divide unit is busy.",
"EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x0",
"PEBScounters": "0,1,2,3",
"EventName": "CYCLES_DIV_BUSY.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles a divider is busy"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts core cycles the integer divide unit is busy.",
"EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "CYCLES_DIV_BUSY.IDIV",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles the integer divide unit is busy"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts core cycles the floating point divide unit is busy.",
"EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "CYCLES_DIV_BUSY.FPDIV",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Cycles the FP divide unit is busy"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
"EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x1",
"PEBScounters": "0,1,2,3",
"EventName": "BACLEARS.ALL",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "BACLEARs asserted for any branch type"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts BACLEARS on return instructions.",
"EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "BACLEARS.RETURN",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "BACLEARs asserted for return branch"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
"EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "BACLEARS.COND",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "BACLEARs asserted for conditional branch"
}
]
\ No newline at end of file
[
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walk completed due to a demand load to a 4K page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walk completed due to a demand load to a 1GB page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walks outstanding due to a demand load every cycle."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to a demand data store to a 4K page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to a demand data store to a 1GB page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walks outstanding due to a demand data store every cycle."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.",
"EventCode": "0x4F",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "EPT.WALK_PENDING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walks outstanding due to walking the EPT every cycle"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"EventCode": "0x81",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB.MISS",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "ITLB misses"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x2",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to an instruction fetch in a 4K page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x4",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x8",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1GB",
"PDIR_COUNTER": "na",
"SampleAfterValue": "2000003",
"BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
"PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PDIR_COUNTER": "na",
"SampleAfterValue": "200003",
"BriefDescription": "Page walks outstanding due to an instruction fetch every cycle."
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.",
"EventCode": "0xBD",
"Counter": "0,1,2,3",
"UMask": "0x20",
"PEBScounters": "0,1,2,3",
"EventName": "TLB_FLUSHES.STLB_ANY",
"PDIR_COUNTER": "na",
"SampleAfterValue": "20003",
"BriefDescription": "STLB flushes"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts load uops retired that caused a DTLB miss.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x11",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"SampleAfterValue": "200003",
"BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts store uops retired that caused a DTLB miss.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x12",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
"SampleAfterValue": "200003",
"BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x13",
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
}
]
\ No newline at end of file
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )", "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,19 +49,19 @@ ...@@ -49,19 +49,19 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / ( cpu@uops_executed.core\\,cmask\\=1@ / 2)) if #SMT_on else (UOPS_EXECUTED.CORE / cpu@uops_executed.core\\,cmask\\=1@)", "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
{ {
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION ) ) / RS_EVENTS.EMPTY_END", "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches", "MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost" "MetricName": "BAClear_Cost"
}, },
...@@ -79,13 +79,13 @@ ...@@ -79,13 +79,13 @@
}, },
{ {
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES", "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW", "MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP" "MetricName": "MLP"
}, },
{ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB", "MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization" "MetricName": "Page_Walks_Utilization"
}, },
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )", "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,19 +49,19 @@ ...@@ -49,19 +49,19 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / ( cpu@uops_executed.core\\,cmask\\=1@ / 2)) if #SMT_on else UOPS_EXECUTED.CORE / cpu@uops_executed.core\\,cmask\\=1@", "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
{ {
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION ) ) / RS_EVENTS.EMPTY_END", "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches", "MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost" "MetricName": "BAClear_Cost"
}, },
...@@ -79,13 +79,13 @@ ...@@ -79,13 +79,13 @@
}, },
{ {
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES", "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW", "MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP" "MetricName": "MLP"
}, },
{ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB", "MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization" "MetricName": "Page_Walks_Utilization"
}, },
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )", "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,19 +49,19 @@ ...@@ -49,19 +49,19 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
{ {
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END", "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches", "MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost" "MetricName": "BAClear_Cost"
}, },
...@@ -79,13 +79,13 @@ ...@@ -79,13 +79,13 @@
}, },
{ {
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES", "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW", "MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP" "MetricName": "MLP"
}, },
{ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB", "MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization" "MetricName": "Page_Walks_Utilization"
}, },
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
}, },
{ {
"BriefDescription": "Giga Floating Point Operations Per Second", "BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE ) / 1000000000 / duration_time", "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary", "MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs" "MetricName": "GFLOPs"
}, },
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )", "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,19 +49,19 @@ ...@@ -49,19 +49,19 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
{ {
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END", "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches", "MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost" "MetricName": "BAClear_Cost"
}, },
...@@ -79,13 +79,13 @@ ...@@ -79,13 +79,13 @@
}, },
{ {
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES", "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW", "MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP" "MetricName": "MLP"
}, },
{ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB", "MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization" "MetricName": "Page_Walks_Utilization"
}, },
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
}, },
{ {
"BriefDescription": "Giga Floating Point Operations Per Second", "BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE ) / 1000000000 / duration_time", "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary", "MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs" "MetricName": "GFLOPs"
}, },
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )", "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,13 +49,13 @@ ...@@ -49,13 +49,13 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_DISPATCHED.THREAD / ( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@", "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
}, },
{ {
"BriefDescription": "Giga Floating Point Operations Per Second", "BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE ) / 1000000000 / duration_time", "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary", "MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs" "MetricName": "GFLOPs"
}, },
......
...@@ -9,6 +9,7 @@ GenuineIntel-6-27,v4,bonnell,core ...@@ -9,6 +9,7 @@ GenuineIntel-6-27,v4,bonnell,core
GenuineIntel-6-36,v4,bonnell,core GenuineIntel-6-36,v4,bonnell,core
GenuineIntel-6-35,v4,bonnell,core GenuineIntel-6-35,v4,bonnell,core
GenuineIntel-6-5C,v8,goldmont,core GenuineIntel-6-5C,v8,goldmont,core
GenuineIntel-6-7A,v1,goldmontplus,core
GenuineIntel-6-3C,v24,haswell,core GenuineIntel-6-3C,v24,haswell,core
GenuineIntel-6-45,v24,haswell,core GenuineIntel-6-45,v24,haswell,core
GenuineIntel-6-46,v24,haswell,core GenuineIntel-6-46,v24,haswell,core
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )", "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,13 +49,13 @@ ...@@ -49,13 +49,13 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_DISPATCHED.THREAD / ( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@", "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
}, },
{ {
"BriefDescription": "Giga Floating Point Operations Per Second", "BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE ) / 1000000000 / duration_time", "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary", "MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs" "MetricName": "GFLOPs"
}, },
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )", "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
}, },
{ {
"BriefDescription": "Total issue-pipeline slots", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,19 +49,19 @@ ...@@ -49,19 +49,19 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1", "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
{ {
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END", "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches", "MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost" "MetricName": "BAClear_Cost"
}, },
...@@ -73,19 +73,19 @@ ...@@ -73,19 +73,19 @@
}, },
{ {
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
"MetricGroup": "Memory_Bound;Memory_Lat", "MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency" "MetricName": "Load_Miss_Real_Latency"
}, },
{ {
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES", "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW", "MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP" "MetricName": "MLP"
}, },
{ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles )", "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB", "MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization" "MetricName": "Page_Walks_Utilization"
}, },
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
}, },
{ {
"BriefDescription": "Giga Floating Point Operations Per Second", "BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time", "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary", "MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs" "MetricName": "GFLOPs"
}, },
......
...@@ -13,19 +13,19 @@ ...@@ -13,19 +13,19 @@
}, },
{ {
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
"MetricExpr": "min( 1 , UOPS_ISSUED.ANY / (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )", "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
"MetricGroup": "Frontend", "MetricGroup": "Frontend",
"MetricName": "IFetch_Line_Utilization" "MetricName": "IFetch_Line_Utilization"
}, },
{ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
"MetricGroup": "DSB; Frontend_Bandwidth", "MetricGroup": "DSB; Frontend_Bandwidth",
"MetricName": "DSB_Coverage" "MetricName": "DSB_Coverage"
}, },
{ {
"BriefDescription": "Cycles Per Instruction (threaded)", "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / INST_RETIRED.ANY / cycles", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
"MetricGroup": "Pipeline;Summary", "MetricGroup": "Pipeline;Summary",
"MetricName": "CPI" "MetricName": "CPI"
}, },
...@@ -36,8 +36,8 @@ ...@@ -36,8 +36,8 @@
"MetricName": "CLKS" "MetricName": "CLKS"
}, },
{ {
"BriefDescription": "Total issue-pipeline slots (per-core)", "BriefDescription": "Total issue-pipeline slots",
"MetricExpr": "4*cycles if not #SMT_on else (( CPU_CLK_UNHALTED.THREAD / 2) * (CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK )) if #EBS_Mode else ( CPU_CLK_UNHALTED.THREAD_ANY / 2 )", "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TopDownL1", "MetricGroup": "TopDownL1",
"MetricName": "SLOTS" "MetricName": "SLOTS"
}, },
...@@ -49,25 +49,25 @@ ...@@ -49,25 +49,25 @@
}, },
{ {
"BriefDescription": "Instructions Per Cycle (per physical core)", "BriefDescription": "Instructions Per Cycle (per physical core)",
"MetricExpr": "INST_RETIRED.ANY / cycles if not #SMT_on else (( CPU_CLK_UNHALTED.THREAD / 2) * (CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK )) if #EBS_Mode else ( CPU_CLK_UNHALTED.THREAD_ANY / 2 )", "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CoreIPC" "MetricName": "CoreIPC"
}, },
{ {
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
"MetricExpr": "UOPS_EXECUTED.THREAD / ( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1", "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization", "MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP" "MetricName": "ILP"
}, },
{ {
"BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
"MetricExpr": "( RS_EVENTS.EMPTY_CYCLES - (ICACHE_16B.IFDATA_STALL +2* ICACHE_16B.IFDATA_STALL:c1:e1) - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END", "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
"MetricGroup": "Unknown_Branches", "MetricGroup": "Unknown_Branches",
"MetricName": "BAClear_Cost" "MetricName": "BAClear_Cost"
}, },
{ {
"BriefDescription": "Core actual clocks when any thread is active on the physical core", "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD if not #SMT_on else (( CPU_CLK_UNHALTED.THREAD / 2) * (CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK )) if 1 else ( CPU_CLK_UNHALTED.THREAD_ANY / 2 )", "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT", "MetricGroup": "SMT",
"MetricName": "CORE_CLKS" "MetricName": "CORE_CLKS"
}, },
...@@ -79,34 +79,16 @@ ...@@ -79,34 +79,16 @@
}, },
{ {
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES", "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
"MetricGroup": "Memory_Bound;Memory_BW", "MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP" "MetricName": "MLP"
}, },
{ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles if not #SMT_on else (( CPU_CLK_UNHALTED.THREAD / 2) * (CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK )) if #EBS_Mode else ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) )", "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB", "MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization" "MetricName": "Page_Walks_Utilization"
}, },
{
"BriefDescription": "L1 cache miss per kilo instruction for demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS_PS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses;",
"MetricName": "L1MPKI"
},
{
"BriefDescription": "L2 cache miss per kilo instruction for demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS_PS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses;",
"MetricName": "L2MPKI"
},
{
"BriefDescription": "L3 cache miss per kilo instruction for demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS_PS / INST_RETIRED.ANY",
"MetricGroup": "Cache_Misses;",
"MetricName": "L3MPKI"
},
{ {
"BriefDescription": "Average CPU Utilization", "BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
...@@ -115,7 +97,7 @@ ...@@ -115,7 +97,7 @@
}, },
{ {
"BriefDescription": "Giga Floating Point Operations Per Second", "BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16* FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1000000000 / duration_time", "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary", "MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs" "MetricName": "GFLOPs"
}, },
......
...@@ -7,3 +7,4 @@ ret = 1 ...@@ -7,3 +7,4 @@ ret = 1
# events are disabled by default when attached to cpu # events are disabled by default when attached to cpu
disabled=1 disabled=1
enable_on_exec=0 enable_on_exec=0
optional=1
...@@ -4,3 +4,4 @@ args = -e cycles kill >/dev/null 2>&1 ...@@ -4,3 +4,4 @@ args = -e cycles kill >/dev/null 2>&1
ret = 1 ret = 1
[event:base-stat] [event:base-stat]
optional=1
...@@ -32,6 +32,7 @@ config=2 ...@@ -32,6 +32,7 @@ config=2
fd=5 fd=5
type=0 type=0
config=0 config=0
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat] [event6:base-stat]
...@@ -52,15 +53,18 @@ optional=1 ...@@ -52,15 +53,18 @@ optional=1
fd=8 fd=8
type=0 type=0
config=1 config=1
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat] [event9:base-stat]
fd=9 fd=9
type=0 type=0
config=4 config=4
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat] [event10:base-stat]
fd=10 fd=10
type=0 type=0
config=5 config=5
optional=1
...@@ -33,6 +33,7 @@ config=2 ...@@ -33,6 +33,7 @@ config=2
fd=5 fd=5
type=0 type=0
config=0 config=0
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat] [event6:base-stat]
...@@ -53,18 +54,21 @@ optional=1 ...@@ -53,18 +54,21 @@ optional=1
fd=8 fd=8
type=0 type=0
config=1 config=1
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat] [event9:base-stat]
fd=9 fd=9
type=0 type=0
config=4 config=4
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat] [event10:base-stat]
fd=10 fd=10
type=0 type=0
config=5 config=5
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 | # PERF_COUNT_HW_CACHE_L1D << 0 |
...@@ -74,6 +78,7 @@ config=5 ...@@ -74,6 +78,7 @@ config=5
fd=11 fd=11
type=3 type=3
config=0 config=0
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 | # PERF_COUNT_HW_CACHE_L1D << 0 |
...@@ -83,6 +88,7 @@ config=0 ...@@ -83,6 +88,7 @@ config=0
fd=12 fd=12
type=3 type=3
config=65536 config=65536
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 | # PERF_COUNT_HW_CACHE_LL << 0 |
...@@ -92,6 +98,7 @@ config=65536 ...@@ -92,6 +98,7 @@ config=65536
fd=13 fd=13
type=3 type=3
config=2 config=2
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 | # PERF_COUNT_HW_CACHE_LL << 0 |
...@@ -101,3 +108,4 @@ config=2 ...@@ -101,3 +108,4 @@ config=2
fd=14 fd=14
type=3 type=3
config=65538 config=65538
optional=1
...@@ -33,6 +33,7 @@ config=2 ...@@ -33,6 +33,7 @@ config=2
fd=5 fd=5
type=0 type=0
config=0 config=0
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat] [event6:base-stat]
...@@ -53,18 +54,21 @@ optional=1 ...@@ -53,18 +54,21 @@ optional=1
fd=8 fd=8
type=0 type=0
config=1 config=1
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat] [event9:base-stat]
fd=9 fd=9
type=0 type=0
config=4 config=4
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat] [event10:base-stat]
fd=10 fd=10
type=0 type=0
config=5 config=5
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 | # PERF_COUNT_HW_CACHE_L1D << 0 |
...@@ -74,6 +78,7 @@ config=5 ...@@ -74,6 +78,7 @@ config=5
fd=11 fd=11
type=3 type=3
config=0 config=0
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 | # PERF_COUNT_HW_CACHE_L1D << 0 |
...@@ -83,6 +88,7 @@ config=0 ...@@ -83,6 +88,7 @@ config=0
fd=12 fd=12
type=3 type=3
config=65536 config=65536
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 | # PERF_COUNT_HW_CACHE_LL << 0 |
...@@ -92,6 +98,7 @@ config=65536 ...@@ -92,6 +98,7 @@ config=65536
fd=13 fd=13
type=3 type=3
config=2 config=2
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 | # PERF_COUNT_HW_CACHE_LL << 0 |
...@@ -101,6 +108,7 @@ config=2 ...@@ -101,6 +108,7 @@ config=2
fd=14 fd=14
type=3 type=3
config=65538 config=65538
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1I << 0 | # PERF_COUNT_HW_CACHE_L1I << 0 |
...@@ -120,6 +128,7 @@ optional=1 ...@@ -120,6 +128,7 @@ optional=1
fd=16 fd=16
type=3 type=3
config=65537 config=65537
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 | # PERF_COUNT_HW_CACHE_DTLB << 0 |
...@@ -129,6 +138,7 @@ config=65537 ...@@ -129,6 +138,7 @@ config=65537
fd=17 fd=17
type=3 type=3
config=3 config=3
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 | # PERF_COUNT_HW_CACHE_DTLB << 0 |
...@@ -138,6 +148,7 @@ config=3 ...@@ -138,6 +148,7 @@ config=3
fd=18 fd=18
type=3 type=3
config=65539 config=65539
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 | # PERF_COUNT_HW_CACHE_ITLB << 0 |
...@@ -147,6 +158,7 @@ config=65539 ...@@ -147,6 +158,7 @@ config=65539
fd=19 fd=19
type=3 type=3
config=4 config=4
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 | # PERF_COUNT_HW_CACHE_ITLB << 0 |
...@@ -156,3 +168,4 @@ config=4 ...@@ -156,3 +168,4 @@ config=4
fd=20 fd=20
type=3 type=3
config=65540 config=65540
optional=1
...@@ -33,6 +33,7 @@ config=2 ...@@ -33,6 +33,7 @@ config=2
fd=5 fd=5
type=0 type=0
config=0 config=0
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat] [event6:base-stat]
...@@ -53,18 +54,21 @@ optional=1 ...@@ -53,18 +54,21 @@ optional=1
fd=8 fd=8
type=0 type=0
config=1 config=1
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat] [event9:base-stat]
fd=9 fd=9
type=0 type=0
config=4 config=4
optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat] [event10:base-stat]
fd=10 fd=10
type=0 type=0
config=5 config=5
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 | # PERF_COUNT_HW_CACHE_L1D << 0 |
...@@ -74,6 +78,7 @@ config=5 ...@@ -74,6 +78,7 @@ config=5
fd=11 fd=11
type=3 type=3
config=0 config=0
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 | # PERF_COUNT_HW_CACHE_L1D << 0 |
...@@ -83,6 +88,7 @@ config=0 ...@@ -83,6 +88,7 @@ config=0
fd=12 fd=12
type=3 type=3
config=65536 config=65536
optional=1
# PERF_TYPE_HW_CACHE / # PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 | # PERF_COUNT_HW_CACHE_LL << 0 |
...@@ -92,6 +98,7 @@ config=65536 ...@@ -92,6 +98,7 @@ config=65536
fd=13 fd=13
type=3 type=3
config=2 config=2
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 | # PERF_COUNT_HW_CACHE_LL << 0 |
...@@ -101,6 +108,7 @@ config=2 ...@@ -101,6 +108,7 @@ config=2
fd=14 fd=14
type=3 type=3
config=65538 config=65538
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1I << 0 | # PERF_COUNT_HW_CACHE_L1I << 0 |
...@@ -120,6 +128,7 @@ optional=1 ...@@ -120,6 +128,7 @@ optional=1
fd=16 fd=16
type=3 type=3
config=65537 config=65537
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 | # PERF_COUNT_HW_CACHE_DTLB << 0 |
...@@ -129,6 +138,7 @@ config=65537 ...@@ -129,6 +138,7 @@ config=65537
fd=17 fd=17
type=3 type=3
config=3 config=3
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 | # PERF_COUNT_HW_CACHE_DTLB << 0 |
...@@ -138,6 +148,7 @@ config=3 ...@@ -138,6 +148,7 @@ config=3
fd=18 fd=18
type=3 type=3
config=65539 config=65539
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 | # PERF_COUNT_HW_CACHE_ITLB << 0 |
...@@ -147,6 +158,7 @@ config=65539 ...@@ -147,6 +158,7 @@ config=65539
fd=19 fd=19
type=3 type=3
config=4 config=4
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 | # PERF_COUNT_HW_CACHE_ITLB << 0 |
...@@ -156,6 +168,7 @@ config=4 ...@@ -156,6 +168,7 @@ config=4
fd=20 fd=20
type=3 type=3
config=65540 config=65540
optional=1
# PERF_TYPE_HW_CACHE, # PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1D << 0 | # PERF_COUNT_HW_CACHE_L1D << 0 |
......
...@@ -5,3 +5,4 @@ ret = 1 ...@@ -5,3 +5,4 @@ ret = 1
[event:base-stat] [event:base-stat]
inherit=0 inherit=0
optional=1
...@@ -13,6 +13,7 @@ libperf-y += find_bit.o ...@@ -13,6 +13,7 @@ libperf-y += find_bit.o
libperf-y += kallsyms.o libperf-y += kallsyms.o
libperf-y += levenshtein.o libperf-y += levenshtein.o
libperf-y += llvm-utils.o libperf-y += llvm-utils.o
libperf-y += mmap.o
libperf-y += memswap.o libperf-y += memswap.o
libperf-y += parse-events.o libperf-y += parse-events.o
libperf-y += perf_regs.o libperf-y += perf_regs.o
......
...@@ -49,10 +49,9 @@ struct arch { ...@@ -49,10 +49,9 @@ struct arch {
void *priv; void *priv;
unsigned int model; unsigned int model;
unsigned int family; unsigned int family;
int (*init)(struct arch *arch); int (*init)(struct arch *arch, char *cpuid);
bool (*ins_is_fused)(struct arch *arch, const char *ins1, bool (*ins_is_fused)(struct arch *arch, const char *ins1,
const char *ins2); const char *ins2);
int (*cpuid_parse)(struct arch *arch, char *cpuid);
struct { struct {
char comment_char; char comment_char;
char skip_functions_char; char skip_functions_char;
...@@ -132,10 +131,10 @@ static struct arch architectures[] = { ...@@ -132,10 +131,10 @@ static struct arch architectures[] = {
}, },
{ {
.name = "x86", .name = "x86",
.init = x86__annotate_init,
.instructions = x86__instructions, .instructions = x86__instructions,
.nr_instructions = ARRAY_SIZE(x86__instructions), .nr_instructions = ARRAY_SIZE(x86__instructions),
.ins_is_fused = x86__ins_is_fused, .ins_is_fused = x86__ins_is_fused,
.cpuid_parse = x86__cpuid_parse,
.objdump = { .objdump = {
.comment_char = '#', .comment_char = '#',
}, },
...@@ -1447,16 +1446,13 @@ int symbol__disassemble(struct symbol *sym, struct map *map, ...@@ -1447,16 +1446,13 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
*parch = arch; *parch = arch;
if (arch->init) { if (arch->init) {
err = arch->init(arch); err = arch->init(arch, cpuid);
if (err) { if (err) {
pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name); pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
return err; return err;
} }
} }
if (arch->cpuid_parse && cpuid)
arch->cpuid_parse(arch, cpuid);
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map->unmap_ip(map, sym->start), symfs_filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end)); map->unmap_ip(map, sym->end));
......
...@@ -111,50 +111,53 @@ int dump_printf(const char *fmt, ...) ...@@ -111,50 +111,53 @@ int dump_printf(const char *fmt, ...)
return ret; return ret;
} }
static void trace_event_printer(enum binary_printer_ops op, static int trace_event_printer(enum binary_printer_ops op,
unsigned int val, void *extra) unsigned int val, void *extra, FILE *fp)
{ {
const char *color = PERF_COLOR_BLUE; const char *color = PERF_COLOR_BLUE;
union perf_event *event = (union perf_event *)extra; union perf_event *event = (union perf_event *)extra;
unsigned char ch = (unsigned char)val; unsigned char ch = (unsigned char)val;
int printed = 0;
switch (op) { switch (op) {
case BINARY_PRINT_DATA_BEGIN: case BINARY_PRINT_DATA_BEGIN:
printf("."); printed += fprintf(fp, ".");
color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n", printed += color_fprintf(fp, color, "\n. ... raw event: size %d bytes\n",
event->header.size); event->header.size);
break; break;
case BINARY_PRINT_LINE_BEGIN: case BINARY_PRINT_LINE_BEGIN:
printf("."); printed += fprintf(fp, ".");
break; break;
case BINARY_PRINT_ADDR: case BINARY_PRINT_ADDR:
color_fprintf(stdout, color, " %04x: ", val); printed += color_fprintf(fp, color, " %04x: ", val);
break; break;
case BINARY_PRINT_NUM_DATA: case BINARY_PRINT_NUM_DATA:
color_fprintf(stdout, color, " %02x", val); printed += color_fprintf(fp, color, " %02x", val);
break; break;
case BINARY_PRINT_NUM_PAD: case BINARY_PRINT_NUM_PAD:
color_fprintf(stdout, color, " "); printed += color_fprintf(fp, color, " ");
break; break;
case BINARY_PRINT_SEP: case BINARY_PRINT_SEP:
color_fprintf(stdout, color, " "); printed += color_fprintf(fp, color, " ");
break; break;
case BINARY_PRINT_CHAR_DATA: case BINARY_PRINT_CHAR_DATA:
color_fprintf(stdout, color, "%c", printed += color_fprintf(fp, color, "%c",
isprint(ch) ? ch : '.'); isprint(ch) ? ch : '.');
break; break;
case BINARY_PRINT_CHAR_PAD: case BINARY_PRINT_CHAR_PAD:
color_fprintf(stdout, color, " "); printed += color_fprintf(fp, color, " ");
break; break;
case BINARY_PRINT_LINE_END: case BINARY_PRINT_LINE_END:
color_fprintf(stdout, color, "\n"); printed += color_fprintf(fp, color, "\n");
break; break;
case BINARY_PRINT_DATA_END: case BINARY_PRINT_DATA_END:
printf("\n"); printed += fprintf(fp, "\n");
break; break;
default: default:
break; break;
} }
return printed;
} }
void trace_event(union perf_event *event) void trace_event(union perf_event *event)
......
...@@ -33,9 +33,6 @@ ...@@ -33,9 +33,6 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/err.h> #include <linux/err.h>
static void perf_mmap__munmap(struct perf_mmap *map);
static void perf_mmap__put(struct perf_mmap *map);
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
...@@ -704,129 +701,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist) ...@@ -704,129 +701,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
return perf_evlist__set_paused(evlist, false); return perf_evlist__set_paused(evlist, false);
} }
/* When check_messup is true, 'end' must points to a good entry */
static union perf_event *
perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
u64 end, u64 *prev)
{
unsigned char *data = md->base + page_size;
union perf_event *event = NULL;
int diff = end - start;
if (check_messup) {
/*
* If we're further behind than half the buffer, there's a chance
* the writer will bite our tail and mess up the samples under us.
*
* If we somehow ended up ahead of the 'end', we got messed up.
*
* In either case, truncate and restart at 'end'.
*/
if (diff > md->mask / 2 || diff < 0) {
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
/*
* 'end' points to a known good entry, start there.
*/
start = end;
diff = 0;
}
}
if (diff >= (int)sizeof(event->header)) {
size_t size;
event = (union perf_event *)&data[start & md->mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size) {
event = NULL;
goto broken_event;
}
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
if ((start & md->mask) + size != ((start + size) & md->mask)) {
unsigned int offset = start;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = md->event_copy;
do {
cpy = min(md->mask + 1 - (offset & md->mask), len);
memcpy(dst, &data[offset & md->mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
event = (union perf_event *) md->event_copy;
}
start += size;
}
broken_event:
if (prev)
*prev = start;
return event;
}
union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
{
u64 head;
u64 old = md->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&md->refcnt))
return NULL;
head = perf_mmap__read_head(md);
return perf_mmap__read(md, check_messup, old, head, &md->prev);
}
union perf_event *
perf_mmap__read_backward(struct perf_mmap *md)
{
u64 head, end;
u64 start = md->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&md->refcnt))
return NULL;
head = perf_mmap__read_head(md);
if (!head)
return NULL;
/*
* 'head' pointer starts from 0. Kernel minus sizeof(record) form
* it each time when kernel writes to it, so in fact 'head' is
* negative. 'end' pointer is made manually by adding the size of
* the ring buffer to 'head' pointer, means the validate data can
* read is the whole ring buffer. If 'end' is positive, the ring
* buffer has not fully filled, so we must adjust 'end' to 0.
*
* However, since both 'head' and 'end' is unsigned, we can't
* simply compare 'end' against 0. Here we compare '-head' and
* the size of the ring buffer, where -head is the number of bytes
* kernel write to the ring buffer.
*/
if (-head < (u64)(md->mask + 1))
end = 0;
else
end = head + md->mask + 1;
return perf_mmap__read(md, false, start, end, &md->prev);
}
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx) union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
{ {
struct perf_mmap *md = &evlist->mmap[idx]; struct perf_mmap *md = &evlist->mmap[idx];
...@@ -857,96 +731,16 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) ...@@ -857,96 +731,16 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
return perf_evlist__mmap_read_forward(evlist, idx); return perf_evlist__mmap_read_forward(evlist, idx);
} }
void perf_mmap__read_catchup(struct perf_mmap *md)
{
u64 head;
if (!refcount_read(&md->refcnt))
return;
head = perf_mmap__read_head(md);
md->prev = head;
}
void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
{ {
perf_mmap__read_catchup(&evlist->mmap[idx]); perf_mmap__read_catchup(&evlist->mmap[idx]);
} }
static bool perf_mmap__empty(struct perf_mmap *md)
{
return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
}
static void perf_mmap__get(struct perf_mmap *map)
{
refcount_inc(&map->refcnt);
}
static void perf_mmap__put(struct perf_mmap *md)
{
BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
if (refcount_dec_and_test(&md->refcnt))
perf_mmap__munmap(md);
}
void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
{
if (!overwrite) {
u64 old = md->prev;
perf_mmap__write_tail(md, old);
}
if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
perf_mmap__put(md);
}
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{ {
perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite); perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
} }
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
struct auxtrace_mmap_params *mp __maybe_unused,
void *userpg __maybe_unused,
int fd __maybe_unused)
{
return 0;
}
void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}
void __weak auxtrace_mmap_params__init(
struct auxtrace_mmap_params *mp __maybe_unused,
off_t auxtrace_offset __maybe_unused,
unsigned int auxtrace_pages __maybe_unused,
bool auxtrace_overwrite __maybe_unused)
{
}
void __weak auxtrace_mmap_params__set_idx(
struct auxtrace_mmap_params *mp __maybe_unused,
struct perf_evlist *evlist __maybe_unused,
int idx __maybe_unused,
bool per_cpu __maybe_unused)
{
}
static void perf_mmap__munmap(struct perf_mmap *map)
{
if (map->base != NULL) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
{ {
int i; int i;
...@@ -995,48 +789,6 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) ...@@ -995,48 +789,6 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
return map; return map;
} }
struct mmap_params {
int prot;
int mask;
struct auxtrace_mmap_params auxtrace_mp;
};
static int perf_mmap__mmap(struct perf_mmap *map,
struct mmap_params *mp, int fd)
{
/*
* The last one will be done at perf_evlist__mmap_consume(), so that we
* make sure we don't prevent tools from consuming every last event in
* the ring buffer.
*
* I.e. we can get the POLLHUP meaning that the fd doesn't exist
* anymore, but the last events for it are still in the ring buffer,
* waiting to be consumed.
*
* Tools can chose to ignore this at their own discretion, but the
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
refcount_set(&map->refcnt, 2);
map->prev = 0;
map->mask = mp->mask;
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
MAP_SHARED, fd, 0);
if (map->base == MAP_FAILED) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
map->base = NULL;
return -1;
}
map->fd = fd;
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
return 0;
}
static bool static bool
perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
struct perf_evsel *evsel) struct perf_evsel *evsel)
......
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
#include "../perf.h" #include "../perf.h"
#include "event.h" #include "event.h"
#include "evsel.h" #include "evsel.h"
#include "mmap.h"
#include "util.h" #include "util.h"
#include "auxtrace.h"
#include <signal.h> #include <signal.h>
#include <unistd.h> #include <unistd.h>
...@@ -24,55 +24,6 @@ struct record_opts; ...@@ -24,55 +24,6 @@ struct record_opts;
#define PERF_EVLIST__HLIST_BITS 8 #define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
/**
* struct perf_mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
struct perf_mmap {
void *base;
int mask;
int fd;
refcount_t refcnt;
u64 prev;
struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
};
static inline size_t
perf_mmap__mmap_len(struct perf_mmap *map)
{
return map->mask + 1 + page_size;
}
/*
* State machine of bkw_mmap_state:
*
* .________________(forbid)_____________.
* | V
* NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
* ^ ^ | ^ |
* | |__(forbid)____/ |___(forbid)___/|
* | |
* \_________________(3)_______________/
*
* NOTREADY : Backward ring buffers are not ready
* RUNNING : Backward ring buffers are recording
* DATA_PENDING : We are required to collect data from backward ring buffers
* EMPTY : We have collected data from backward ring buffers.
*
* (0): Setup backward ring buffer
* (1): Pause ring buffers for reading
* (2): Read from ring buffers
* (3): Resume ring buffers for recording
*/
enum bkw_mmap_state {
BKW_MMAP_NOTREADY,
BKW_MMAP_RUNNING,
BKW_MMAP_DATA_PENDING,
BKW_MMAP_EMPTY,
};
struct perf_evlist { struct perf_evlist {
struct list_head entries; struct list_head entries;
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
...@@ -177,12 +128,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); ...@@ -177,12 +128,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state); void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
void perf_mmap__read_catchup(struct perf_mmap *md);
void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
...@@ -286,25 +231,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); ...@@ -286,25 +231,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
u64 head = ACCESS_ONCE(pc->data_head);
rmb();
return head;
}
static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
{
struct perf_event_mmap_page *pc = md->base;
/*
* ensure all reads are done before we write the tail out.
*/
mb();
pc->data_tail = tail;
}
bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str); bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
void perf_evlist__to_front(struct perf_evlist *evlist, void perf_evlist__to_front(struct perf_evlist *evlist,
struct perf_evsel *move_evsel); struct perf_evsel *move_evsel);
......
/*
* Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
* copyright notes.
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
#include "debug.h"
#include "event.h"
#include "mmap.h"
#include "util.h" /* page_size */
size_t perf_mmap__mmap_len(struct perf_mmap *map)
{
return map->mask + 1 + page_size;
}
/* When check_messup is true, 'end' must points to a good entry */
static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
u64 start, u64 end, u64 *prev)
{
unsigned char *data = map->base + page_size;
union perf_event *event = NULL;
int diff = end - start;
if (check_messup) {
/*
* If we're further behind than half the buffer, there's a chance
* the writer will bite our tail and mess up the samples under us.
*
* If we somehow ended up ahead of the 'end', we got messed up.
*
* In either case, truncate and restart at 'end'.
*/
if (diff > map->mask / 2 || diff < 0) {
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
/*
* 'end' points to a known good entry, start there.
*/
start = end;
diff = 0;
}
}
if (diff >= (int)sizeof(event->header)) {
size_t size;
event = (union perf_event *)&data[start & map->mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size) {
event = NULL;
goto broken_event;
}
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
if ((start & map->mask) + size != ((start + size) & map->mask)) {
unsigned int offset = start;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = map->event_copy;
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
event = (union perf_event *)map->event_copy;
}
start += size;
}
broken_event:
if (prev)
*prev = start;
return event;
}
union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
{
u64 head;
u64 old = map->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->refcnt))
return NULL;
head = perf_mmap__read_head(map);
return perf_mmap__read(map, check_messup, old, head, &map->prev);
}
union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
{
u64 head, end;
u64 start = map->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->refcnt))
return NULL;
head = perf_mmap__read_head(map);
if (!head)
return NULL;
/*
* 'head' pointer starts from 0. Kernel minus sizeof(record) form
* it each time when kernel writes to it, so in fact 'head' is
* negative. 'end' pointer is made manually by adding the size of
* the ring buffer to 'head' pointer, means the validate data can
* read is the whole ring buffer. If 'end' is positive, the ring
* buffer has not fully filled, so we must adjust 'end' to 0.
*
* However, since both 'head' and 'end' is unsigned, we can't
* simply compare 'end' against 0. Here we compare '-head' and
* the size of the ring buffer, where -head is the number of bytes
* kernel write to the ring buffer.
*/
if (-head < (u64)(map->mask + 1))
end = 0;
else
end = head + map->mask + 1;
return perf_mmap__read(map, false, start, end, &map->prev);
}
void perf_mmap__read_catchup(struct perf_mmap *map)
{
u64 head;
if (!refcount_read(&map->refcnt))
return;
head = perf_mmap__read_head(map);
map->prev = head;
}
static bool perf_mmap__empty(struct perf_mmap *map)
{
return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
}
void perf_mmap__get(struct perf_mmap *map)
{
refcount_inc(&map->refcnt);
}
void perf_mmap__put(struct perf_mmap *map)
{
BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
if (refcount_dec_and_test(&map->refcnt))
perf_mmap__munmap(map);
}
void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
{
if (!overwrite) {
u64 old = map->prev;
perf_mmap__write_tail(map, old);
}
if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
perf_mmap__put(map);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
struct auxtrace_mmap_params *mp __maybe_unused,
void *userpg __maybe_unused,
int fd __maybe_unused)
{
return 0;
}
void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}
void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
off_t auxtrace_offset __maybe_unused,
unsigned int auxtrace_pages __maybe_unused,
bool auxtrace_overwrite __maybe_unused)
{
}
void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
struct perf_evlist *evlist __maybe_unused,
int idx __maybe_unused,
bool per_cpu __maybe_unused)
{
}
void perf_mmap__munmap(struct perf_mmap *map)
{
if (map->base != NULL) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
{
/*
* The last one will be done at perf_evlist__mmap_consume(), so that we
* make sure we don't prevent tools from consuming every last event in
* the ring buffer.
*
* I.e. we can get the POLLHUP meaning that the fd doesn't exist
* anymore, but the last events for it are still in the ring buffer,
* waiting to be consumed.
*
* Tools can chose to ignore this at their own discretion, but the
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
refcount_set(&map->refcnt, 2);
map->prev = 0;
map->mask = mp->mask;
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
MAP_SHARED, fd, 0);
if (map->base == MAP_FAILED) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
map->base = NULL;
return -1;
}
map->fd = fd;
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
return 0;
}
static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
{
struct perf_event_header *pheader;
u64 evt_head = head;
int size = mask + 1;
pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
pheader = (struct perf_event_header *)(buf + (head & mask));
*start = head;
while (true) {
if (evt_head - head >= (unsigned int)size) {
pr_debug("Finished reading backward ring buffer: rewind\n");
if (evt_head - head > (unsigned int)size)
evt_head -= pheader->size;
*end = evt_head;
return 0;
}
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
if (pheader->size == 0) {
pr_debug("Finished reading backward ring buffer: get start\n");
*end = evt_head;
return 0;
}
evt_head += pheader->size;
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
}
WARN_ONCE(1, "Shouldn't get here\n");
return -1;
}
static int rb_find_range(void *data, int mask, u64 head, u64 old,
u64 *start, u64 *end, bool backward)
{
if (!backward) {
*start = old;
*end = head;
return 0;
}
return backward_rb_find_range(data, mask, head, start, end);
}
int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
void *to, int push(void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
u64 old = md->prev;
u64 end = head, start = old;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
return -1;
if (start == end)
return 0;
size = end - start;
if (size > (unsigned long)(md->mask) + 1) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
return 0;
}
if ((start & md->mask) + size != (end & md->mask)) {
buf = &data[start & md->mask];
size = md->mask + 1 - (start & md->mask);
start += size;
if (push(to, buf, size) < 0) {
rc = -1;
goto out;
}
}
buf = &data[start & md->mask];
size = end - start;
start += size;
if (push(to, buf, size) < 0) {
rc = -1;
goto out;
}
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
out:
return rc;
}
#ifndef __PERF_MMAP_H
#define __PERF_MMAP_H 1
#include <linux/compiler.h>
#include <linux/refcount.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <stdbool.h>
#include "auxtrace.h"
#include "event.h"
/**
* struct perf_mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
struct perf_mmap {
void *base;
int mask;
int fd;
refcount_t refcnt;
u64 prev;
struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
};
/*
* State machine of bkw_mmap_state:
*
* .________________(forbid)_____________.
* | V
* NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
* ^ ^ | ^ |
* | |__(forbid)____/ |___(forbid)___/|
* | |
* \_________________(3)_______________/
*
* NOTREADY : Backward ring buffers are not ready
* RUNNING : Backward ring buffers are recording
* DATA_PENDING : We are required to collect data from backward ring buffers
* EMPTY : We have collected data from backward ring buffers.
*
* (0): Setup backward ring buffer
* (1): Pause ring buffers for reading
* (2): Read from ring buffers
* (3): Resume ring buffers for recording
*/
enum bkw_mmap_state {
BKW_MMAP_NOTREADY,
BKW_MMAP_RUNNING,
BKW_MMAP_DATA_PENDING,
BKW_MMAP_EMPTY,
};
struct mmap_params {
int prot, mask;
struct auxtrace_mmap_params auxtrace_mp;
};
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd);
void perf_mmap__munmap(struct perf_mmap *map);
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
void perf_mmap__read_catchup(struct perf_mmap *md);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
u64 head = ACCESS_ONCE(pc->data_head);
rmb();
return head;
}
static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
{
struct perf_event_mmap_page *pc = md->base;
/*
* ensure all reads are done before we write the tail out.
*/
mb();
pc->data_tail = tail;
}
union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
void *to, int push(void *to, void *buf, size_t size));
size_t perf_mmap__mmap_len(struct perf_mmap *map);
#endif /*__PERF_MMAP_H */
...@@ -9,9 +9,10 @@ ...@@ -9,9 +9,10 @@
#ifndef __PERF_NAMESPACES_H #ifndef __PERF_NAMESPACES_H
#define __PERF_NAMESPACES_H #define __PERF_NAMESPACES_H
#include "../perf.h" #include <sys/types.h>
#include <linux/list.h> #include <linux/perf_event.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/types.h>
struct namespaces_event; struct namespaces_event;
......
...@@ -2,40 +2,42 @@ ...@@ -2,40 +2,42 @@
#include <linux/log2.h> #include <linux/log2.h>
#include "sane_ctype.h" #include "sane_ctype.h"
void print_binary(unsigned char *data, size_t len, int binary__fprintf(unsigned char *data, size_t len,
size_t bytes_per_line, print_binary_t printer, size_t bytes_per_line, binary__fprintf_t printer,
void *extra) void *extra, FILE *fp)
{ {
size_t i, j, mask; size_t i, j, mask;
int printed = 0;
if (!printer) if (!printer)
return; return 0;
bytes_per_line = roundup_pow_of_two(bytes_per_line); bytes_per_line = roundup_pow_of_two(bytes_per_line);
mask = bytes_per_line - 1; mask = bytes_per_line - 1;
printer(BINARY_PRINT_DATA_BEGIN, 0, extra); printed += printer(BINARY_PRINT_DATA_BEGIN, 0, extra, fp);
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
if ((i & mask) == 0) { if ((i & mask) == 0) {
printer(BINARY_PRINT_LINE_BEGIN, -1, extra); printed += printer(BINARY_PRINT_LINE_BEGIN, -1, extra, fp);
printer(BINARY_PRINT_ADDR, i, extra); printed += printer(BINARY_PRINT_ADDR, i, extra, fp);
} }
printer(BINARY_PRINT_NUM_DATA, data[i], extra); printed += printer(BINARY_PRINT_NUM_DATA, data[i], extra, fp);
if (((i & mask) == mask) || i == len - 1) { if (((i & mask) == mask) || i == len - 1) {
for (j = 0; j < mask-(i & mask); j++) for (j = 0; j < mask-(i & mask); j++)
printer(BINARY_PRINT_NUM_PAD, -1, extra); printed += printer(BINARY_PRINT_NUM_PAD, -1, extra, fp);
printer(BINARY_PRINT_SEP, i, extra); printer(BINARY_PRINT_SEP, i, extra, fp);
for (j = i & ~mask; j <= i; j++) for (j = i & ~mask; j <= i; j++)
printer(BINARY_PRINT_CHAR_DATA, data[j], extra); printed += printer(BINARY_PRINT_CHAR_DATA, data[j], extra, fp);
for (j = 0; j < mask-(i & mask); j++) for (j = 0; j < mask-(i & mask); j++)
printer(BINARY_PRINT_CHAR_PAD, i, extra); printed += printer(BINARY_PRINT_CHAR_PAD, i, extra, fp);
printer(BINARY_PRINT_LINE_END, -1, extra); printed += printer(BINARY_PRINT_LINE_END, -1, extra, fp);
} }
} }
printer(BINARY_PRINT_DATA_END, -1, extra); printed += printer(BINARY_PRINT_DATA_END, -1, extra, fp);
return printed;
} }
int is_printable_array(char *p, unsigned int len) int is_printable_array(char *p, unsigned int len)
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define PERF_PRINT_BINARY_H #define PERF_PRINT_BINARY_H
#include <stddef.h> #include <stddef.h>
#include <stdio.h>
enum binary_printer_ops { enum binary_printer_ops {
BINARY_PRINT_DATA_BEGIN, BINARY_PRINT_DATA_BEGIN,
...@@ -16,12 +17,19 @@ enum binary_printer_ops { ...@@ -16,12 +17,19 @@ enum binary_printer_ops {
BINARY_PRINT_DATA_END, BINARY_PRINT_DATA_END,
}; };
typedef void (*print_binary_t)(enum binary_printer_ops op, typedef int (*binary__fprintf_t)(enum binary_printer_ops op,
unsigned int val, void *extra); unsigned int val, void *extra, FILE *fp);
void print_binary(unsigned char *data, size_t len, int binary__fprintf(unsigned char *data, size_t len,
size_t bytes_per_line, print_binary_t printer, size_t bytes_per_line, binary__fprintf_t printer,
void *extra); void *extra, FILE *fp);
static inline void print_binary(unsigned char *data, size_t len,
size_t bytes_per_line, binary__fprintf_t printer,
void *extra)
{
binary__fprintf(data, len, bytes_per_line, printer, extra, stdout);
}
int is_printable_array(char *p, unsigned int len); int is_printable_array(char *p, unsigned int len);
......
...@@ -10,6 +10,7 @@ util/ctype.c ...@@ -10,6 +10,7 @@ util/ctype.c
util/evlist.c util/evlist.c
util/evsel.c util/evsel.c
util/cpumap.c util/cpumap.c
util/mmap.c
util/namespaces.c util/namespaces.c
../lib/bitmap.c ../lib/bitmap.c
../lib/find_bit.c ../lib/find_bit.c
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment