Commit f9e2bb42 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-for-v5.12-2020-03-28' of...

Merge tag 'perf-tools-fixes-for-v5.12-2020-03-28' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull perf tooling fixes from Arnaldo Carvalho de Melo:

 - Avoid write of uninitialized memory when generating PERF_RECORD_MMAP*
   records.

 - Fix 'perf top' BPF support related crash with perf_event_paranoid=3 +
   kptr_restrict.

 - Validate raw event with sysfs exported format bits.

 - Fix waipid on SIGCHLD delivery bugs in 'perf daemon'.

 - Change to use bash for daemon test on Debian, where the default is
   dash and thus fails for use of bashisms in this test.

 - Fix memory leak in vDSO found using ASAN.

 - Remove now useless (due to the fact that BPF now supports static
   vars) failing sub test "BPF relocation checker".

 - Fix auxtrace queue conflict.

 - Sync linux/kvm.h with the kernel sources.

* tag 'perf-tools-fixes-for-v5.12-2020-03-28' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
  perf test: Change to use bash for daemon test
  perf record: Fix memory leak in vDSO found using ASAN
  perf test: Remove now useless failing sub test "BPF relocation checker"
  perf daemon: Return from kill functions
  perf daemon: Force waipid for all session on SIGCHLD delivery
  perf top: Fix BPF support related crash with perf_event_paranoid=3 + kptr_restrict
  perf pmu: Validate raw event with sysfs exported format bits
  perf synthetic events: Avoid write of uninitialized memory when generating PERF_RECORD_MMAP* records
  tools headers UAPI: Sync linux/kvm.h with the kernel sources
  perf synthetic-events: Fix uninitialized 'kernel_thread' variable
  perf auxtrace: Fix auxtrace queue conflict
parents 3fef15f8 1dc481c0
...@@ -1154,6 +1154,7 @@ struct kvm_x86_mce { ...@@ -1154,6 +1154,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
struct kvm_xen_hvm_config { struct kvm_xen_hvm_config {
__u32 flags; __u32 flags;
...@@ -1621,12 +1622,24 @@ struct kvm_xen_vcpu_attr { ...@@ -1621,12 +1622,24 @@ struct kvm_xen_vcpu_attr {
union { union {
__u64 gpa; __u64 gpa;
__u64 pad[8]; __u64 pad[8];
struct {
__u64 state;
__u64 state_entry_time;
__u64 time_running;
__u64 time_runnable;
__u64 time_blocked;
__u64 time_offline;
} runstate;
} u; } u;
}; };
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
/* Secure Encrypted Virtualization command */ /* Secure Encrypted Virtualization command */
enum sev_cmd_id { enum sev_cmd_id {
......
...@@ -402,35 +402,42 @@ static pid_t handle_signalfd(struct daemon *daemon) ...@@ -402,35 +402,42 @@ static pid_t handle_signalfd(struct daemon *daemon)
int status; int status;
pid_t pid; pid_t pid;
/*
* Take signal fd data as pure signal notification and check all
* the sessions state. The reason is that multiple signals can get
* coalesced in kernel and we can receive only single signal even
* if multiple SIGCHLD were generated.
*/
err = read(daemon->signal_fd, &si, sizeof(struct signalfd_siginfo)); err = read(daemon->signal_fd, &si, sizeof(struct signalfd_siginfo));
if (err != sizeof(struct signalfd_siginfo)) if (err != sizeof(struct signalfd_siginfo)) {
pr_err("failed to read signal fd\n");
return -1; return -1;
}
list_for_each_entry(session, &daemon->sessions, list) { list_for_each_entry(session, &daemon->sessions, list) {
if (session->pid == -1)
continue;
if (session->pid != (int) si.ssi_pid) pid = waitpid(session->pid, &status, WNOHANG);
if (pid <= 0)
continue; continue;
pid = waitpid(session->pid, &status, 0); if (WIFEXITED(status)) {
if (pid == session->pid) { pr_info("session '%s' exited, status=%d\n",
if (WIFEXITED(status)) { session->name, WEXITSTATUS(status));
pr_info("session '%s' exited, status=%d\n", } else if (WIFSIGNALED(status)) {
session->name, WEXITSTATUS(status)); pr_info("session '%s' killed (signal %d)\n",
} else if (WIFSIGNALED(status)) { session->name, WTERMSIG(status));
pr_info("session '%s' killed (signal %d)\n", } else if (WIFSTOPPED(status)) {
session->name, WTERMSIG(status)); pr_info("session '%s' stopped (signal %d)\n",
} else if (WIFSTOPPED(status)) { session->name, WSTOPSIG(status));
pr_info("session '%s' stopped (signal %d)\n", } else {
session->name, WSTOPSIG(status)); pr_info("session '%s' Unexpected status (0x%x)\n",
} else { session->name, status);
pr_info("session '%s' Unexpected status (0x%x)\n",
session->name, status);
}
} }
session->state = KILL; session->state = KILL;
session->pid = -1; session->pid = -1;
return pid;
} }
return 0; return 0;
...@@ -443,7 +450,6 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d ...@@ -443,7 +450,6 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
.fd = daemon->signal_fd, .fd = daemon->signal_fd,
.events = POLLIN, .events = POLLIN,
}; };
pid_t wpid = 0, pid = session->pid;
time_t start; time_t start;
start = time(NULL); start = time(NULL);
...@@ -452,7 +458,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d ...@@ -452,7 +458,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
int err = poll(&pollfd, 1, 1000); int err = poll(&pollfd, 1, 1000);
if (err > 0) { if (err > 0) {
wpid = handle_signalfd(daemon); handle_signalfd(daemon);
} else if (err < 0) { } else if (err < 0) {
perror("failed: poll\n"); perror("failed: poll\n");
return -1; return -1;
...@@ -460,7 +466,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d ...@@ -460,7 +466,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
if (start + secs < time(NULL)) if (start + secs < time(NULL))
return -1; return -1;
} while (wpid != pid); } while (session->pid != -1);
return 0; return 0;
} }
...@@ -902,7 +908,9 @@ static void daemon_session__kill(struct daemon_session *session, ...@@ -902,7 +908,9 @@ static void daemon_session__kill(struct daemon_session *session,
daemon_session__signal(session, SIGKILL); daemon_session__signal(session, SIGKILL);
break; break;
default: default:
break; pr_err("failed to wait for session %s\n",
session->name);
return;
} }
how++; how++;
...@@ -955,7 +963,8 @@ static void daemon__kill(struct daemon *daemon) ...@@ -955,7 +963,8 @@ static void daemon__kill(struct daemon *daemon)
daemon__signal(daemon, SIGKILL); daemon__signal(daemon, SIGKILL);
break; break;
default: default:
break; pr_err("failed to wait for sessions\n");
return;
} }
how++; how++;
...@@ -1344,7 +1353,7 @@ static int __cmd_start(struct daemon *daemon, struct option parent_options[], ...@@ -1344,7 +1353,7 @@ static int __cmd_start(struct daemon *daemon, struct option parent_options[],
close(sock_fd); close(sock_fd);
if (conf_fd != -1) if (conf_fd != -1)
close(conf_fd); close(conf_fd);
if (conf_fd != -1) if (signal_fd != -1)
close(signal_fd); close(signal_fd);
pr_info("daemon exited\n"); pr_info("daemon exited\n");
......
...@@ -86,7 +86,7 @@ static struct { ...@@ -86,7 +86,7 @@ static struct {
.msg_load_fail = "check your vmlinux setting?", .msg_load_fail = "check your vmlinux setting?",
.target_func = &epoll_pwait_loop, .target_func = &epoll_pwait_loop,
.expect_result = (NR_ITERS + 1) / 2, .expect_result = (NR_ITERS + 1) / 2,
.pin = true, .pin = true,
}, },
#ifdef HAVE_BPF_PROLOGUE #ifdef HAVE_BPF_PROLOGUE
{ {
...@@ -99,13 +99,6 @@ static struct { ...@@ -99,13 +99,6 @@ static struct {
.expect_result = (NR_ITERS + 1) / 4, .expect_result = (NR_ITERS + 1) / 4,
}, },
#endif #endif
{
.prog_id = LLVM_TESTCASE_BPF_RELOCATION,
.desc = "BPF relocation checker",
.name = "[bpf_relocation_test]",
.msg_compile_fail = "fix 'perf test LLVM' first",
.msg_load_fail = "libbpf error when dealing with relocation",
},
}; };
static int do_test(struct bpf_object *obj, int (*func)(void), static int do_test(struct bpf_object *obj, int (*func)(void),
......
#!/bin/sh #!/bin/bash
# daemon operations # daemon operations
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
......
...@@ -298,10 +298,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, ...@@ -298,10 +298,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
queue->set = true; queue->set = true;
queue->tid = buffer->tid; queue->tid = buffer->tid;
queue->cpu = buffer->cpu; queue->cpu = buffer->cpu;
} else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
queue->cpu, queue->tid, buffer->cpu, buffer->tid);
return -EINVAL;
} }
buffer->buffer_nr = queues->next_buffer_nr++; buffer->buffer_nr = queues->next_buffer_nr++;
......
...@@ -196,25 +196,32 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session, ...@@ -196,25 +196,32 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
} }
if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) { if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
free(info_linear);
pr_debug("%s: the kernel is too old, aborting\n", __func__); pr_debug("%s: the kernel is too old, aborting\n", __func__);
return -2; return -2;
} }
info = &info_linear->info; info = &info_linear->info;
if (!info->jited_ksyms) {
free(info_linear);
return -1;
}
/* number of ksyms, func_lengths, and tags should match */ /* number of ksyms, func_lengths, and tags should match */
sub_prog_cnt = info->nr_jited_ksyms; sub_prog_cnt = info->nr_jited_ksyms;
if (sub_prog_cnt != info->nr_prog_tags || if (sub_prog_cnt != info->nr_prog_tags ||
sub_prog_cnt != info->nr_jited_func_lens) sub_prog_cnt != info->nr_jited_func_lens) {
free(info_linear);
return -1; return -1;
}
/* check BTF func info support */ /* check BTF func info support */
if (info->btf_id && info->nr_func_info && info->func_info_rec_size) { if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
/* btf func info number should be same as sub_prog_cnt */ /* btf func info number should be same as sub_prog_cnt */
if (sub_prog_cnt != info->nr_func_info) { if (sub_prog_cnt != info->nr_func_info) {
pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__); pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
err = -1; free(info_linear);
goto out; return -1;
} }
if (btf__get_from_id(info->btf_id, &btf)) { if (btf__get_from_id(info->btf_id, &btf)) {
pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id); pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
......
...@@ -356,6 +356,9 @@ __add_event(struct list_head *list, int *idx, ...@@ -356,6 +356,9 @@ __add_event(struct list_head *list, int *idx,
struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
cpu_list ? perf_cpu_map__new(cpu_list) : NULL; cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
if (pmu && attr->type == PERF_TYPE_RAW)
perf_pmu__warn_invalid_config(pmu, attr->config, name);
if (init_attr) if (init_attr)
event_attr_init(attr); event_attr_init(attr);
......
...@@ -1812,3 +1812,36 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu) ...@@ -1812,3 +1812,36 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
return nr_caps; return nr_caps;
} }
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
char *name)
{
struct perf_pmu_format *format;
__u64 masks = 0, bits;
char buf[100];
unsigned int i;
list_for_each_entry(format, &pmu->format, list) {
if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG)
continue;
for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS)
masks |= 1ULL << i;
}
/*
* Kernel doesn't export any valid format bits.
*/
if (masks == 0)
return;
bits = config & ~masks;
if (bits == 0)
return;
bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf));
pr_warning("WARNING: event '%s' not valid (bits %s of config "
"'%llx' not supported by kernel)!\n",
name ?: "N/A", buf, config);
}
...@@ -123,4 +123,7 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval); ...@@ -123,4 +123,7 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
int perf_pmu__caps_parse(struct perf_pmu *pmu); int perf_pmu__caps_parse(struct perf_pmu *pmu);
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
char *name);
#endif /* __PMU_H */ #endif /* __PMU_H */
...@@ -424,7 +424,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, ...@@ -424,7 +424,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
while (!io.eof) { while (!io.eof) {
static const char anonstr[] = "//anon"; static const char anonstr[] = "//anon";
size_t size; size_t size, aligned_size;
/* ensure null termination since stack will be reused. */ /* ensure null termination since stack will be reused. */
event->mmap2.filename[0] = '\0'; event->mmap2.filename[0] = '\0';
...@@ -484,11 +484,12 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, ...@@ -484,11 +484,12 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
} }
size = strlen(event->mmap2.filename) + 1; size = strlen(event->mmap2.filename) + 1;
size = PERF_ALIGN(size, sizeof(u64)); aligned_size = PERF_ALIGN(size, sizeof(u64));
event->mmap2.len -= event->mmap.start; event->mmap2.len -= event->mmap.start;
event->mmap2.header.size = (sizeof(event->mmap2) - event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - size)); (sizeof(event->mmap2.filename) - aligned_size));
memset(event->mmap2.filename + size, 0, machine->id_hdr_size); memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
(aligned_size - size));
event->mmap2.header.size += machine->id_hdr_size; event->mmap2.header.size += machine->id_hdr_size;
event->mmap2.pid = tgid; event->mmap2.pid = tgid;
event->mmap2.tid = pid; event->mmap2.tid = pid;
...@@ -758,7 +759,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, ...@@ -758,7 +759,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
char *end; char *end;
pid_t _pid; pid_t _pid;
bool kernel_thread; bool kernel_thread = false;
_pid = strtol(dirent[i]->d_name, &end, 10); _pid = strtol(dirent[i]->d_name, &end, 10);
if (*end) if (*end)
......
...@@ -133,6 +133,8 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s ...@@ -133,6 +133,8 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s
if (dso != NULL) { if (dso != NULL) {
__dsos__add(&machine->dsos, dso); __dsos__add(&machine->dsos, dso);
dso__set_long_name(dso, long_name, false); dso__set_long_name(dso, long_name, false);
/* Put dso here because __dsos_add already got it */
dso__put(dso);
} }
return dso; return dso;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment