Commit 6d499a6b authored by Namhyung Kim's avatar Namhyung Kim Committed by Arnaldo Carvalho de Melo

perf lock: Print the number of lost entries for BPF

Like the normal 'perf lock contention' output, it'd print the number of
lost entries for BPF if exists or -v option is passed.

Currently it uses BROKEN_CONTENDED stat for the lost count (due to full
stack maps).

  $ sudo perf lock con -a -b --map-nr-entries 128 sleep 5
  ...
  === output for debug===

  bad: 43, total: 14903
  bad rate: 0.29 %
  histogram of events caused bad sequence
      acquire: 0
     acquired: 0
    contended: 43
      release: 0
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: Blake Jones <blakejones@google.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220802191004.347740-3-namhyung@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent ceb13bfc
...@@ -1472,8 +1472,11 @@ static void print_contention_result(void) ...@@ -1472,8 +1472,11 @@ static void print_contention_result(void)
pr_info(" %10s %s\n\n", "type", "caller"); pr_info(" %10s %s\n\n", "type", "caller");
bad = total = 0; bad = total = 0;
if (use_bpf)
bad = bad_hist[BROKEN_CONTENDED];
while ((st = pop_from_result())) { while ((st = pop_from_result())) {
total++; total += use_bpf ? st->nr_contended : 1;
if (st->broken) if (st->broken)
bad++; bad++;
...@@ -1687,6 +1690,9 @@ static int __cmd_contention(int argc, const char **argv) ...@@ -1687,6 +1690,9 @@ static int __cmd_contention(int argc, const char **argv)
lock_contention_stop(); lock_contention_stop();
lock_contention_read(&con); lock_contention_read(&con);
/* abuse bad hist stats for lost entries */
bad_hist[BROKEN_CONTENDED] = con.lost;
} else { } else {
err = perf_session__process_events(session); err = perf_session__process_events(session);
if (err) if (err)
......
...@@ -16,7 +16,7 @@ static struct lock_contention_bpf *skel; ...@@ -16,7 +16,7 @@ static struct lock_contention_bpf *skel;
/* should be same as bpf_skel/lock_contention.bpf.c */ /* should be same as bpf_skel/lock_contention.bpf.c */
struct lock_contention_key { struct lock_contention_key {
u32 stack_id; s32 stack_id;
}; };
struct lock_contention_data { struct lock_contention_data {
...@@ -110,7 +110,7 @@ int lock_contention_stop(void) ...@@ -110,7 +110,7 @@ int lock_contention_stop(void)
int lock_contention_read(struct lock_contention *con) int lock_contention_read(struct lock_contention *con)
{ {
int fd, stack; int fd, stack;
u32 prev_key, key; s32 prev_key, key;
struct lock_contention_data data; struct lock_contention_data data;
struct lock_stat *st; struct lock_stat *st;
struct machine *machine = con->machine; struct machine *machine = con->machine;
...@@ -119,6 +119,8 @@ int lock_contention_read(struct lock_contention *con) ...@@ -119,6 +119,8 @@ int lock_contention_read(struct lock_contention *con)
fd = bpf_map__fd(skel->maps.lock_stat); fd = bpf_map__fd(skel->maps.lock_stat);
stack = bpf_map__fd(skel->maps.stacks); stack = bpf_map__fd(skel->maps.stacks);
con->lost = skel->bss->lost;
prev_key = 0; prev_key = 0;
while (!bpf_map_get_next_key(fd, &prev_key, &key)) { while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
struct map *kmap; struct map *kmap;
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#define MAX_ENTRIES 10240 #define MAX_ENTRIES 10240
struct contention_key { struct contention_key {
__u32 stack_id; __s32 stack_id;
}; };
struct contention_data { struct contention_data {
...@@ -27,7 +27,7 @@ struct tstamp_data { ...@@ -27,7 +27,7 @@ struct tstamp_data {
__u64 timestamp; __u64 timestamp;
__u64 lock; __u64 lock;
__u32 flags; __u32 flags;
__u32 stack_id; __s32 stack_id;
}; };
/* callstack storage */ /* callstack storage */
...@@ -73,6 +73,9 @@ int enabled; ...@@ -73,6 +73,9 @@ int enabled;
int has_cpu; int has_cpu;
int has_task; int has_task;
/* error stat */
unsigned long lost;
static inline int can_record(void) static inline int can_record(void)
{ {
if (has_cpu) { if (has_cpu) {
...@@ -116,6 +119,8 @@ int contention_begin(u64 *ctx) ...@@ -116,6 +119,8 @@ int contention_begin(u64 *ctx)
pelem->flags = (__u32)ctx[1]; pelem->flags = (__u32)ctx[1];
pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP); pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
if (pelem->stack_id < 0)
lost++;
return 0; return 0;
} }
......
...@@ -113,6 +113,7 @@ struct lock_contention { ...@@ -113,6 +113,7 @@ struct lock_contention {
struct machine *machine; struct machine *machine;
struct hlist_head *result; struct hlist_head *result;
unsigned long map_nr_entries; unsigned long map_nr_entries;
unsigned long lost;
}; };
#ifdef HAVE_BPF_SKEL #ifdef HAVE_BPF_SKEL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment