Commit 1fcb8768 authored by Adrian Hunter's avatar Adrian Hunter Committed by Arnaldo Carvalho de Melo

perf machine: Fix the value used for unknown pids

The value used for unknown pids cannot be zero because that is used by
the "idle" task.

Use -1 instead.  Also handle the unknown pid case when creating map
groups.

Note that, threads with an unknown pid should not occur because fork (or
synthesized) events precede the thread's existence.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1405332185-4050-2-git-send-email-adrian.hunter@intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 57608cfd
...@@ -935,8 +935,8 @@ static int latency_switch_event(struct perf_sched *sched, ...@@ -935,8 +935,8 @@ static int latency_switch_event(struct perf_sched *sched,
return -1; return -1;
} }
sched_out = machine__findnew_thread(machine, 0, prev_pid); sched_out = machine__findnew_thread(machine, -1, prev_pid);
sched_in = machine__findnew_thread(machine, 0, next_pid); sched_in = machine__findnew_thread(machine, -1, next_pid);
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) { if (!out_events) {
...@@ -979,7 +979,7 @@ static int latency_runtime_event(struct perf_sched *sched, ...@@ -979,7 +979,7 @@ static int latency_runtime_event(struct perf_sched *sched,
{ {
const u32 pid = perf_evsel__intval(evsel, sample, "pid"); const u32 pid = perf_evsel__intval(evsel, sample, "pid");
const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
struct thread *thread = machine__findnew_thread(machine, 0, pid); struct thread *thread = machine__findnew_thread(machine, -1, pid);
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
u64 timestamp = sample->time; u64 timestamp = sample->time;
int cpu = sample->cpu; int cpu = sample->cpu;
...@@ -1012,7 +1012,7 @@ static int latency_wakeup_event(struct perf_sched *sched, ...@@ -1012,7 +1012,7 @@ static int latency_wakeup_event(struct perf_sched *sched,
struct thread *wakee; struct thread *wakee;
u64 timestamp = sample->time; u64 timestamp = sample->time;
wakee = machine__findnew_thread(machine, 0, pid); wakee = machine__findnew_thread(machine, -1, pid);
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) { if (!atoms) {
if (thread_atoms_insert(sched, wakee)) if (thread_atoms_insert(sched, wakee))
...@@ -1072,7 +1072,7 @@ static int latency_migrate_task_event(struct perf_sched *sched, ...@@ -1072,7 +1072,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
if (sched->profile_cpu == -1) if (sched->profile_cpu == -1)
return 0; return 0;
migrant = machine__findnew_thread(machine, 0, pid); migrant = machine__findnew_thread(machine, -1, pid);
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) { if (!atoms) {
if (thread_atoms_insert(sched, migrant)) if (thread_atoms_insert(sched, migrant))
...@@ -1290,7 +1290,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, ...@@ -1290,7 +1290,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
return -1; return -1;
} }
sched_in = machine__findnew_thread(machine, 0, next_pid); sched_in = machine__findnew_thread(machine, -1, next_pid);
sched->curr_thread[this_cpu] = sched_in; sched->curr_thread[this_cpu] = sched_in;
......
...@@ -34,7 +34,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) ...@@ -34,7 +34,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
return -ENOMEM; return -ENOMEM;
if (pid != HOST_KERNEL_ID) { if (pid != HOST_KERNEL_ID) {
struct thread *thread = machine__findnew_thread(machine, 0, struct thread *thread = machine__findnew_thread(machine, -1,
pid); pid);
char comm[64]; char comm[64];
...@@ -286,7 +286,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine, ...@@ -286,7 +286,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
* the full rbtree: * the full rbtree:
*/ */
if (machine->last_match && machine->last_match->tid == tid) { if (machine->last_match && machine->last_match->tid == tid) {
if (pid && pid != machine->last_match->pid_) if (pid != -1 && pid != machine->last_match->pid_)
machine->last_match->pid_ = pid; machine->last_match->pid_ = pid;
return machine->last_match; return machine->last_match;
} }
...@@ -297,7 +297,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine, ...@@ -297,7 +297,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
if (th->tid == tid) { if (th->tid == tid) {
machine->last_match = th; machine->last_match = th;
if (pid && pid != th->pid_) if (pid != -1 && pid != th->pid_)
th->pid_ = pid; th->pid_ = pid;
return th; return th;
} }
......
...@@ -1083,13 +1083,14 @@ void perf_event_header__bswap(struct perf_event_header *hdr) ...@@ -1083,13 +1083,14 @@ void perf_event_header__bswap(struct perf_event_header *hdr)
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{ {
return machine__findnew_thread(&session->machines.host, 0, pid); return machine__findnew_thread(&session->machines.host, -1, pid);
} }
static struct thread *perf_session__register_idle_thread(struct perf_session *session) static struct thread *perf_session__register_idle_thread(struct perf_session *session)
{ {
struct thread *thread = perf_session__findnew(session, 0); struct thread *thread;
thread = machine__findnew_thread(&session->machines.host, 0, 0);
if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
pr_err("problem inserting idle task.\n"); pr_err("problem inserting idle task.\n");
thread = NULL; thread = NULL;
......
...@@ -13,7 +13,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine) ...@@ -13,7 +13,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
struct thread *leader; struct thread *leader;
pid_t pid = thread->pid_; pid_t pid = thread->pid_;
if (pid == thread->tid) { if (pid == thread->tid || pid == -1) {
thread->mg = map_groups__new(); thread->mg = map_groups__new();
} else { } else {
leader = machine__findnew_thread(machine, pid, pid); leader = machine__findnew_thread(machine, pid, pid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment