Commit 1862a69c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-for-v5.18-2022-04-09' of...

Merge tag 'perf-tools-fixes-for-v5.18-2022-04-09' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull perf tools fixes from Arnaldo Carvalho de Melo:

 - Fix the clang command line option probing and remove some options to
   filter out, fixing the build with the latest clang versions

 - Fix 'perf bench' futex and epoll benchmarks to deal with machines
   with more than 1K CPUs

 - Fix 'perf test tsc' error message when not supported

 - Remap perf ring buffer if there is no space for event, fixing perf
   usage in 32-bit ChromeOS

 - Drop objdump stderr to avoid getting stuck waiting for stdout output
   in 'perf annotate'

 - Fix up garbled output by now showing unwind error messages when
   augmenting frame in best effort mode

 - Fix perf's libperf_print callback, use the va_args eprintf() variant

 - Sync vhost and arm64 cputype headers with the kernel sources

 - Fix 'perf report --mem-mode' with ARM SPE

 - Add missing external commands ('iiostat', etc) to 'perf --list-cmds'

* tag 'perf-tools-fixes-for-v5.18-2022-04-09' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
  perf annotate: Drop objdump stderr to avoid getting stuck waiting for stdout output
  perf tools: Add external commands to list-cmds
  perf docs: Add perf-iostat link to manpages
  perf session: Remap buf if there is no space for event
  perf bench: Fix epoll bench to correct usage of affinity for machines with #CPUs > 1K
  perf bench: Fix futex bench to correct usage of affinity for machines with #CPUs > 1K
  perf tools: Fix perf's libperf_print callback
  perf: arm-spe: Fix perf report --mem-mode
  perf unwind: Don't show unwind error messages when augmenting frame pointer stack
  tools headers arm64: Sync arm64's cputype.h with the kernel sources
  perf test tsc: Fix error message when not supported
  perf build: Don't use -ffat-lto-objects in the python feature test when building with clang-13
  perf python: Fix probing for some clang command line options
  tools build: Filter out options and warnings not supported by clang
  tools build: Use $(shell ) instead of `` to get embedded libperl's ccopts
  tools include UAPI: Sync linux/vhost.h with the kernel sources
parents 94a4c2bb 940a445a
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#define ARM_CPU_PART_CORTEX_A77 0xD0D #define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_NEOVERSE_V1 0xD40 #define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41 #define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44 #define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46 #define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A710 0xD47 #define ARM_CPU_PART_CORTEX_A710 0xD47
...@@ -130,6 +131,7 @@ ...@@ -130,6 +131,7 @@
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) #define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) #define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
......
...@@ -217,9 +217,16 @@ strip-libs = $(filter-out -l%,$(1)) ...@@ -217,9 +217,16 @@ strip-libs = $(filter-out -l%,$(1))
PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
ifeq ($(CC_NO_CLANG), 0)
PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro
endif
$(OUTPUT)test-libperl.bin: $(OUTPUT)test-libperl.bin:
$(BUILD) $(FLAGS_PERL_EMBED) $(BUILD) $(FLAGS_PERL_EMBED)
......
...@@ -150,4 +150,11 @@ ...@@ -150,4 +150,11 @@
/* Get the valid iova range */ /* Get the valid iova range */
#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
struct vhost_vdpa_iova_range) struct vhost_vdpa_iova_range)
/* Get the config size */
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
/* Get the count of all virtqueues */
#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
#endif #endif
...@@ -83,7 +83,7 @@ linkperf:perf-buildid-list[1], linkperf:perf-c2c[1], ...@@ -83,7 +83,7 @@ linkperf:perf-buildid-list[1], linkperf:perf-c2c[1],
linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1], linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1],
linkperf:perf-evlist[1], linkperf:perf-ftrace[1], linkperf:perf-evlist[1], linkperf:perf-ftrace[1],
linkperf:perf-help[1], linkperf:perf-inject[1], linkperf:perf-help[1], linkperf:perf-inject[1],
linkperf:perf-intel-pt[1], linkperf:perf-kallsyms[1], linkperf:perf-intel-pt[1], linkperf:perf-iostat[1], linkperf:perf-kallsyms[1],
linkperf:perf-kmem[1], linkperf:perf-kvm[1], linkperf:perf-lock[1], linkperf:perf-kmem[1], linkperf:perf-kvm[1], linkperf:perf-lock[1],
linkperf:perf-mem[1], linkperf:perf-probe[1], linkperf:perf-sched[1], linkperf:perf-mem[1], linkperf:perf-probe[1], linkperf:perf-sched[1],
linkperf:perf-script[1], linkperf:perf-test[1], linkperf:perf-script[1], linkperf:perf-test[1],
......
...@@ -272,6 +272,9 @@ ifdef PYTHON_CONFIG ...@@ -272,6 +272,9 @@ ifdef PYTHON_CONFIG
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
ifeq ($(CC_NO_CLANG), 0)
PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS))
endif
endif endif
FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
...@@ -790,6 +793,9 @@ else ...@@ -790,6 +793,9 @@ else
LDFLAGS += $(PERL_EMBED_LDFLAGS) LDFLAGS += $(PERL_EMBED_LDFLAGS)
EXTLIBS += $(PERL_EMBED_LIBADD) EXTLIBS += $(PERL_EMBED_LIBADD)
CFLAGS += -DHAVE_LIBPERL_SUPPORT CFLAGS += -DHAVE_LIBPERL_SUPPORT
ifeq ($(CC_NO_CLANG), 0)
CFLAGS += -Wno-compound-token-split-by-macro
endif
$(call detected,CONFIG_LIBPERL) $(call detected,CONFIG_LIBPERL)
endif endif
endif endif
......
...@@ -239,6 +239,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, ...@@ -239,6 +239,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
arm_spe_set_timestamp(itr, arm_spe_evsel); arm_spe_set_timestamp(itr, arm_spe_evsel);
} }
/*
* Set this only so that perf report knows that SPE generates memory info. It has no effect
* on the opening of the event or the SPE data produced.
*/
evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
/* Add dummy event to keep tracking */ /* Add dummy event to keep tracking */
err = parse_events(evlist, "dummy:u", NULL); err = parse_events(evlist, "dummy:u", NULL);
if (err) if (err)
......
...@@ -222,13 +222,20 @@ static void init_fdmaps(struct worker *w, int pct) ...@@ -222,13 +222,20 @@ static void init_fdmaps(struct worker *w, int pct)
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{ {
pthread_attr_t thread_attr, *attrp = NULL; pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset; cpu_set_t *cpuset;
unsigned int i, j; unsigned int i, j;
int ret = 0; int ret = 0;
int nrcpus;
size_t size;
if (!noaffinity) if (!noaffinity)
pthread_attr_init(&thread_attr); pthread_attr_init(&thread_attr);
nrcpus = perf_cpu_map__nr(cpu);
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < nthreads; i++) { for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i]; struct worker *w = &worker[i];
...@@ -252,22 +259,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) ...@@ -252,22 +259,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
init_fdmaps(w, 50); init_fdmaps(w, 50);
if (!noaffinity) { if (!noaffinity) {
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
size, cpuset);
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
attrp = &thread_attr; attrp = &thread_attr;
} }
ret = pthread_create(&w->thread, attrp, workerfn, ret = pthread_create(&w->thread, attrp, workerfn,
(void *)(struct worker *) w); (void *)(struct worker *) w);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
} }
}
CPU_FREE(cpuset);
if (!noaffinity) if (!noaffinity)
pthread_attr_destroy(&thread_attr); pthread_attr_destroy(&thread_attr);
......
...@@ -291,9 +291,11 @@ static void print_summary(void) ...@@ -291,9 +291,11 @@ static void print_summary(void)
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{ {
pthread_attr_t thread_attr, *attrp = NULL; pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset; cpu_set_t *cpuset;
unsigned int i, j; unsigned int i, j;
int ret = 0, events = EPOLLIN; int ret = 0, events = EPOLLIN;
int nrcpus;
size_t size;
if (oneshot) if (oneshot)
events |= EPOLLONESHOT; events |= EPOLLONESHOT;
...@@ -306,6 +308,11 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) ...@@ -306,6 +308,11 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
if (!noaffinity) if (!noaffinity)
pthread_attr_init(&thread_attr); pthread_attr_init(&thread_attr);
nrcpus = perf_cpu_map__nr(cpu);
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < nthreads; i++) { for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i]; struct worker *w = &worker[i];
...@@ -341,22 +348,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) ...@@ -341,22 +348,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
} }
if (!noaffinity) { if (!noaffinity) {
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
size, cpuset);
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
attrp = &thread_attr; attrp = &thread_attr;
} }
ret = pthread_create(&w->thread, attrp, workerfn, ret = pthread_create(&w->thread, attrp, workerfn,
(void *)(struct worker *) w); (void *)(struct worker *) w);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
} }
}
CPU_FREE(cpuset);
if (!noaffinity) if (!noaffinity)
pthread_attr_destroy(&thread_attr); pthread_attr_destroy(&thread_attr);
......
...@@ -122,12 +122,14 @@ static void print_summary(void) ...@@ -122,12 +122,14 @@ static void print_summary(void)
int bench_futex_hash(int argc, const char **argv) int bench_futex_hash(int argc, const char **argv)
{ {
int ret = 0; int ret = 0;
cpu_set_t cpuset; cpu_set_t *cpuset;
struct sigaction act; struct sigaction act;
unsigned int i; unsigned int i;
pthread_attr_t thread_attr; pthread_attr_t thread_attr;
struct worker *worker = NULL; struct worker *worker = NULL;
struct perf_cpu_map *cpu; struct perf_cpu_map *cpu;
int nrcpus;
size_t size;
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0); argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
if (argc) { if (argc) {
...@@ -170,25 +172,35 @@ int bench_futex_hash(int argc, const char **argv) ...@@ -170,25 +172,35 @@ int bench_futex_hash(int argc, const char **argv)
threads_starting = params.nthreads; threads_starting = params.nthreads;
pthread_attr_init(&thread_attr); pthread_attr_init(&thread_attr);
gettimeofday(&bench__start, NULL); gettimeofday(&bench__start, NULL);
nrcpus = perf_cpu_map__nr(cpu);
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < params.nthreads; i++) { for (i = 0; i < params.nthreads; i++) {
worker[i].tid = i; worker[i].tid = i;
worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex)); worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
if (!worker[i].futex) if (!worker[i].futex)
goto errmem; goto errmem;
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (ret) ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
(void *)(struct worker *) &worker[i]); (void *)(struct worker *) &worker[i]);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
}
} }
CPU_FREE(cpuset);
pthread_attr_destroy(&thread_attr); pthread_attr_destroy(&thread_attr);
pthread_mutex_lock(&thread_lock); pthread_mutex_lock(&thread_lock);
......
...@@ -120,11 +120,17 @@ static void *workerfn(void *arg) ...@@ -120,11 +120,17 @@ static void *workerfn(void *arg)
static void create_threads(struct worker *w, pthread_attr_t thread_attr, static void create_threads(struct worker *w, pthread_attr_t thread_attr,
struct perf_cpu_map *cpu) struct perf_cpu_map *cpu)
{ {
cpu_set_t cpuset; cpu_set_t *cpuset;
unsigned int i; unsigned int i;
int nrcpus = perf_cpu_map__nr(cpu);
size_t size;
threads_starting = params.nthreads; threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < params.nthreads; i++) { for (i = 0; i < params.nthreads; i++) {
worker[i].tid = i; worker[i].tid = i;
...@@ -135,15 +141,20 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr, ...@@ -135,15 +141,20 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr,
} else } else
worker[i].futex = &global_futex; worker[i].futex = &global_futex;
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
} }
}
CPU_FREE(cpuset);
} }
int bench_futex_lock_pi(int argc, const char **argv) int bench_futex_lock_pi(int argc, const char **argv)
......
...@@ -123,22 +123,33 @@ static void *workerfn(void *arg __maybe_unused) ...@@ -123,22 +123,33 @@ static void *workerfn(void *arg __maybe_unused)
static void block_threads(pthread_t *w, static void block_threads(pthread_t *w,
pthread_attr_t thread_attr, struct perf_cpu_map *cpu) pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
{ {
cpu_set_t cpuset; cpu_set_t *cpuset;
unsigned int i; unsigned int i;
int nrcpus = perf_cpu_map__nr(cpu);
size_t size;
threads_starting = params.nthreads; threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */ /* create and block all threads */
for (i = 0; i < params.nthreads; i++) { for (i = 0; i < params.nthreads; i++) {
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
} }
}
CPU_FREE(cpuset);
} }
static void toggle_done(int sig __maybe_unused, static void toggle_done(int sig __maybe_unused,
......
...@@ -144,22 +144,33 @@ static void *blocked_workerfn(void *arg __maybe_unused) ...@@ -144,22 +144,33 @@ static void *blocked_workerfn(void *arg __maybe_unused)
static void block_threads(pthread_t *w, pthread_attr_t thread_attr, static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
struct perf_cpu_map *cpu) struct perf_cpu_map *cpu)
{ {
cpu_set_t cpuset; cpu_set_t *cpuset;
unsigned int i; unsigned int i;
int nrcpus = perf_cpu_map__nr(cpu);
size_t size;
threads_starting = params.nthreads; threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */ /* create and block all threads */
for (i = 0; i < params.nthreads; i++) { for (i = 0; i < params.nthreads; i++) {
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
} }
}
CPU_FREE(cpuset);
} }
static void print_run(struct thread_data *waking_worker, unsigned int run_num) static void print_run(struct thread_data *waking_worker, unsigned int run_num)
......
...@@ -97,22 +97,32 @@ static void print_summary(void) ...@@ -97,22 +97,32 @@ static void print_summary(void)
static void block_threads(pthread_t *w, static void block_threads(pthread_t *w,
pthread_attr_t thread_attr, struct perf_cpu_map *cpu) pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
{ {
cpu_set_t cpuset; cpu_set_t *cpuset;
unsigned int i; unsigned int i;
size_t size;
int nrcpus = perf_cpu_map__nr(cpu);
threads_starting = params.nthreads; threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */ /* create and block all threads */
for (i = 0; i < params.nthreads; i++) { for (i = 0; i < params.nthreads; i++) {
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset)) if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
} }
}
CPU_FREE(cpuset);
} }
static void toggle_done(int sig __maybe_unused, static void toggle_done(int sig __maybe_unused,
......
...@@ -55,6 +55,7 @@ struct cmd_struct { ...@@ -55,6 +55,7 @@ struct cmd_struct {
}; };
static struct cmd_struct commands[] = { static struct cmd_struct commands[] = {
{ "archive", NULL, 0 },
{ "buildid-cache", cmd_buildid_cache, 0 }, { "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 }, { "buildid-list", cmd_buildid_list, 0 },
{ "config", cmd_config, 0 }, { "config", cmd_config, 0 },
...@@ -62,6 +63,7 @@ static struct cmd_struct commands[] = { ...@@ -62,6 +63,7 @@ static struct cmd_struct commands[] = {
{ "diff", cmd_diff, 0 }, { "diff", cmd_diff, 0 },
{ "evlist", cmd_evlist, 0 }, { "evlist", cmd_evlist, 0 },
{ "help", cmd_help, 0 }, { "help", cmd_help, 0 },
{ "iostat", NULL, 0 },
{ "kallsyms", cmd_kallsyms, 0 }, { "kallsyms", cmd_kallsyms, 0 },
{ "list", cmd_list, 0 }, { "list", cmd_list, 0 },
{ "record", cmd_record, 0 }, { "record", cmd_record, 0 },
...@@ -360,6 +362,8 @@ static void handle_internal_command(int argc, const char **argv) ...@@ -360,6 +362,8 @@ static void handle_internal_command(int argc, const char **argv)
for (i = 0; i < ARRAY_SIZE(commands); i++) { for (i = 0; i < ARRAY_SIZE(commands); i++) {
struct cmd_struct *p = commands+i; struct cmd_struct *p = commands+i;
if (p->fn == NULL)
continue;
if (strcmp(p->cmd, cmd)) if (strcmp(p->cmd, cmd))
continue; continue;
exit(run_builtin(p, argc, argv)); exit(run_builtin(p, argc, argv));
...@@ -434,7 +438,7 @@ void pthread__unblock_sigwinch(void) ...@@ -434,7 +438,7 @@ void pthread__unblock_sigwinch(void)
static int libperf_print(enum libperf_print_level level, static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap) const char *fmt, va_list ap)
{ {
return eprintf(level, verbose, fmt, ap); return veprintf(level, verbose, fmt, ap);
} }
int main(int argc, const char **argv) int main(int argc, const char **argv)
......
...@@ -122,7 +122,7 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thr ...@@ -122,7 +122,7 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thr
} }
err = unwind__get_entries(unwind_entry, &cnt, thread, err = unwind__get_entries(unwind_entry, &cnt, thread,
&sample, MAX_STACK); &sample, MAX_STACK, false);
if (err) if (err)
pr_debug("unwind failed\n"); pr_debug("unwind failed\n");
else if (cnt != MAX_STACK) { else if (cnt != MAX_STACK) {
......
...@@ -47,6 +47,17 @@ ...@@ -47,6 +47,17 @@
} \ } \
} }
static int test__tsc_is_supported(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
if (!TSC_IS_SUPPORTED) {
pr_debug("Test not supported on this architecture\n");
return TEST_SKIP;
}
return TEST_OK;
}
/** /**
* test__perf_time_to_tsc - test converting perf time to TSC. * test__perf_time_to_tsc - test converting perf time to TSC.
* *
...@@ -70,7 +81,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su ...@@ -70,7 +81,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
struct perf_cpu_map *cpus = NULL; struct perf_cpu_map *cpus = NULL;
struct evlist *evlist = NULL; struct evlist *evlist = NULL;
struct evsel *evsel = NULL; struct evsel *evsel = NULL;
int err = -1, ret, i; int err = TEST_FAIL, ret, i;
const char *comm1, *comm2; const char *comm1, *comm2;
struct perf_tsc_conversion tc; struct perf_tsc_conversion tc;
struct perf_event_mmap_page *pc; struct perf_event_mmap_page *pc;
...@@ -79,10 +90,6 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su ...@@ -79,10 +90,6 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
u64 test_time, comm1_time = 0, comm2_time = 0; u64 test_time, comm1_time = 0, comm2_time = 0;
struct mmap *md; struct mmap *md;
if (!TSC_IS_SUPPORTED) {
pr_debug("Test not supported on this architecture");
return TEST_SKIP;
}
threads = thread_map__new(-1, getpid(), UINT_MAX); threads = thread_map__new(-1, getpid(), UINT_MAX);
CHECK_NOT_NULL__(threads); CHECK_NOT_NULL__(threads);
...@@ -124,8 +131,8 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su ...@@ -124,8 +131,8 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
ret = perf_read_tsc_conversion(pc, &tc); ret = perf_read_tsc_conversion(pc, &tc);
if (ret) { if (ret) {
if (ret == -EOPNOTSUPP) { if (ret == -EOPNOTSUPP) {
fprintf(stderr, " (not supported)"); pr_debug("perf_read_tsc_conversion is not supported in current kernel\n");
return 0; err = TEST_SKIP;
} }
goto out_err; goto out_err;
} }
...@@ -191,7 +198,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su ...@@ -191,7 +198,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
test_tsc >= comm2_tsc) test_tsc >= comm2_tsc)
goto out_err; goto out_err;
err = 0; err = TEST_OK;
out_err: out_err:
evlist__delete(evlist); evlist__delete(evlist);
...@@ -200,4 +207,15 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su ...@@ -200,4 +207,15 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
return err; return err;
} }
DEFINE_SUITE("Convert perf time to TSC", perf_time_to_tsc); static struct test_case time_to_tsc_tests[] = {
TEST_CASE_REASON("TSC support", tsc_is_supported,
"This architecture does not support"),
TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc,
"perf_read_tsc_conversion is not supported"),
{ .name = NULL, }
};
struct test_suite suite__perf_time_to_tsc = {
.desc = "Convert perf time to TSC",
.test_cases = time_to_tsc_tests,
};
...@@ -2047,6 +2047,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) ...@@ -2047,6 +2047,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
objdump_process.argv = objdump_argv; objdump_process.argv = objdump_argv;
objdump_process.out = -1; objdump_process.out = -1;
objdump_process.err = -1; objdump_process.err = -1;
objdump_process.no_stderr = 1;
if (start_command(&objdump_process)) { if (start_command(&objdump_process)) {
pr_err("Failure starting to run %s\n", command); pr_err("Failure starting to run %s\n", command);
err = -1; err = -1;
......
...@@ -53,7 +53,7 @@ u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thr ...@@ -53,7 +53,7 @@ u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thr
sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0; sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0;
} }
ret = unwind__get_entries(add_entry, &entries, thread, sample, 2); ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
sample->user_regs = old_regs; sample->user_regs = old_regs;
if (ret || entries.length != 2) if (ret || entries.length != 2)
......
...@@ -2987,7 +2987,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread, ...@@ -2987,7 +2987,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread,
return 0; return 0;
return unwind__get_entries(unwind_entry, cursor, return unwind__get_entries(unwind_entry, cursor,
thread, sample, max_stack); thread, sample, max_stack, false);
} }
int thread__resolve_callchain(struct thread *thread, int thread__resolve_callchain(struct thread *thread,
......
...@@ -2095,6 +2095,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size, ...@@ -2095,6 +2095,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
bool needs_swap, union perf_event *error) bool needs_swap, union perf_event *error)
{ {
union perf_event *event; union perf_event *event;
u16 event_size;
/* /*
* Ensure we have enough space remaining to read * Ensure we have enough space remaining to read
...@@ -2107,15 +2108,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size, ...@@ -2107,15 +2108,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
if (needs_swap) if (needs_swap)
perf_event_header__bswap(&event->header); perf_event_header__bswap(&event->header);
if (head + event->header.size <= mmap_size) event_size = event->header.size;
if (head + event_size <= mmap_size)
return event; return event;
/* We're not fetching the event so swap back again */ /* We're not fetching the event so swap back again */
if (needs_swap) if (needs_swap)
perf_event_header__bswap(&event->header); perf_event_header__bswap(&event->header);
pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:" /* Check if the event fits into the next mmapped buf. */
" fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size); if (event_size <= mmap_size - head % page_size) {
/* Remap buf and fetch again. */
return NULL;
}
/* Invalid input. Event size should never exceed mmap_size. */
pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
" fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
return error; return error;
} }
......
from os import getenv from os import getenv, path
from subprocess import Popen, PIPE from subprocess import Popen, PIPE
from re import sub from re import sub
cc = getenv("CC") cc = getenv("CC")
cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline() cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline()
src_feature_tests = getenv('srctree') + '/tools/build/feature'
def clang_has_option(option): def clang_has_option(option):
return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ] cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
if cc_is_clang: if cc_is_clang:
from distutils.sysconfig import get_config_vars from distutils.sysconfig import get_config_vars
...@@ -23,6 +25,8 @@ if cc_is_clang: ...@@ -23,6 +25,8 @@ if cc_is_clang:
vars[var] = sub("-fstack-protector-strong", "", vars[var]) vars[var] = sub("-fstack-protector-strong", "", vars[var])
if not clang_has_option("-fno-semantic-interposition"): if not clang_has_option("-fno-semantic-interposition"):
vars[var] = sub("-fno-semantic-interposition", "", vars[var]) vars[var] = sub("-fno-semantic-interposition", "", vars[var])
if not clang_has_option("-ffat-lto-objects"):
vars[var] = sub("-ffat-lto-objects", "", vars[var])
from distutils.core import setup, Extension from distutils.core import setup, Extension
......
...@@ -200,6 +200,7 @@ frame_callback(Dwfl_Frame *state, void *arg) ...@@ -200,6 +200,7 @@ frame_callback(Dwfl_Frame *state, void *arg)
bool isactivation; bool isactivation;
if (!dwfl_frame_pc(state, &pc, NULL)) { if (!dwfl_frame_pc(state, &pc, NULL)) {
if (!ui->best_effort)
pr_err("%s", dwfl_errmsg(-1)); pr_err("%s", dwfl_errmsg(-1));
return DWARF_CB_ABORT; return DWARF_CB_ABORT;
} }
...@@ -208,6 +209,7 @@ frame_callback(Dwfl_Frame *state, void *arg) ...@@ -208,6 +209,7 @@ frame_callback(Dwfl_Frame *state, void *arg)
report_module(pc, ui); report_module(pc, ui);
if (!dwfl_frame_pc(state, &pc, &isactivation)) { if (!dwfl_frame_pc(state, &pc, &isactivation)) {
if (!ui->best_effort)
pr_err("%s", dwfl_errmsg(-1)); pr_err("%s", dwfl_errmsg(-1));
return DWARF_CB_ABORT; return DWARF_CB_ABORT;
} }
...@@ -222,7 +224,8 @@ frame_callback(Dwfl_Frame *state, void *arg) ...@@ -222,7 +224,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
int unwind__get_entries(unwind_entry_cb_t cb, void *arg, int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread, struct thread *thread,
struct perf_sample *data, struct perf_sample *data,
int max_stack) int max_stack,
bool best_effort)
{ {
struct unwind_info *ui, ui_buf = { struct unwind_info *ui, ui_buf = {
.sample = data, .sample = data,
...@@ -231,6 +234,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, ...@@ -231,6 +234,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
.cb = cb, .cb = cb,
.arg = arg, .arg = arg,
.max_stack = max_stack, .max_stack = max_stack,
.best_effort = best_effort
}; };
Dwarf_Word ip; Dwarf_Word ip;
int err = -EINVAL, i; int err = -EINVAL, i;
......
...@@ -20,6 +20,7 @@ struct unwind_info { ...@@ -20,6 +20,7 @@ struct unwind_info {
void *arg; void *arg;
int max_stack; int max_stack;
int idx; int idx;
bool best_effort;
struct unwind_entry entries[]; struct unwind_entry entries[];
}; };
......
...@@ -96,6 +96,7 @@ struct unwind_info { ...@@ -96,6 +96,7 @@ struct unwind_info {
struct perf_sample *sample; struct perf_sample *sample;
struct machine *machine; struct machine *machine;
struct thread *thread; struct thread *thread;
bool best_effort;
}; };
#define dw_read(ptr, type, end) ({ \ #define dw_read(ptr, type, end) ({ \
...@@ -553,6 +554,7 @@ static int access_reg(unw_addr_space_t __maybe_unused as, ...@@ -553,6 +554,7 @@ static int access_reg(unw_addr_space_t __maybe_unused as,
ret = perf_reg_value(&val, &ui->sample->user_regs, id); ret = perf_reg_value(&val, &ui->sample->user_regs, id);
if (ret) { if (ret) {
if (!ui->best_effort)
pr_err("unwind: can't read reg %d\n", regnum); pr_err("unwind: can't read reg %d\n", regnum);
return ret; return ret;
} }
...@@ -666,7 +668,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, ...@@ -666,7 +668,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
return -1; return -1;
ret = unw_init_remote(&c, addr_space, ui); ret = unw_init_remote(&c, addr_space, ui);
if (ret) if (ret && !ui->best_effort)
display_error(ret); display_error(ret);
while (!ret && (unw_step(&c) > 0) && i < max_stack) { while (!ret && (unw_step(&c) > 0) && i < max_stack) {
...@@ -704,12 +706,14 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, ...@@ -704,12 +706,14 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg, static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread, struct thread *thread,
struct perf_sample *data, int max_stack) struct perf_sample *data, int max_stack,
bool best_effort)
{ {
struct unwind_info ui = { struct unwind_info ui = {
.sample = data, .sample = data,
.thread = thread, .thread = thread,
.machine = thread->maps->machine, .machine = thread->maps->machine,
.best_effort = best_effort
}; };
if (!data->user_regs.regs) if (!data->user_regs.regs)
......
...@@ -80,9 +80,11 @@ void unwind__finish_access(struct maps *maps) ...@@ -80,9 +80,11 @@ void unwind__finish_access(struct maps *maps)
int unwind__get_entries(unwind_entry_cb_t cb, void *arg, int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread, struct thread *thread,
struct perf_sample *data, int max_stack) struct perf_sample *data, int max_stack,
bool best_effort)
{ {
if (thread->maps->unwind_libunwind_ops) if (thread->maps->unwind_libunwind_ops)
return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack); return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
max_stack, best_effort);
return 0; return 0;
} }
...@@ -23,13 +23,19 @@ struct unwind_libunwind_ops { ...@@ -23,13 +23,19 @@ struct unwind_libunwind_ops {
void (*finish_access)(struct maps *maps); void (*finish_access)(struct maps *maps);
int (*get_entries)(unwind_entry_cb_t cb, void *arg, int (*get_entries)(unwind_entry_cb_t cb, void *arg,
struct thread *thread, struct thread *thread,
struct perf_sample *data, int max_stack); struct perf_sample *data, int max_stack, bool best_effort);
}; };
#ifdef HAVE_DWARF_UNWIND_SUPPORT #ifdef HAVE_DWARF_UNWIND_SUPPORT
/*
* When best_effort is set, don't report errors and fail silently. This could
* be expanded in the future to be more permissive about things other than
* error messages.
*/
int unwind__get_entries(unwind_entry_cb_t cb, void *arg, int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread, struct thread *thread,
struct perf_sample *data, int max_stack); struct perf_sample *data, int max_stack,
bool best_effort);
/* libunwind specific */ /* libunwind specific */
#ifdef HAVE_LIBUNWIND_SUPPORT #ifdef HAVE_LIBUNWIND_SUPPORT
#ifndef LIBUNWIND__ARCH_REG_ID #ifndef LIBUNWIND__ARCH_REG_ID
...@@ -65,7 +71,8 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, ...@@ -65,7 +71,8 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
void *arg __maybe_unused, void *arg __maybe_unused,
struct thread *thread __maybe_unused, struct thread *thread __maybe_unused,
struct perf_sample *data __maybe_unused, struct perf_sample *data __maybe_unused,
int max_stack __maybe_unused) int max_stack __maybe_unused,
bool best_effort __maybe_unused)
{ {
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment