Commit d6ace3df authored by Kan Liang's avatar Kan Liang Committed by Arnaldo Carvalho de Melo

perf mmap: Simplify perf_mmap__consume()

It isn't necessary to pass the 'overwrite' argument to
perf_mmap__consume().  Discard it.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Suggested-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1520350567-80082-6-git-send-email-kan.liang@linux.intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent bdec8b2f
......@@ -134,7 +134,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
comm2_time = sample.time;
}
next_event:
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}
......
......@@ -760,7 +760,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
err = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
if (err) {
perf_mmap__consume(md, false);
perf_mmap__consume(md);
pr_err("Failed to parse sample\n");
return -1;
}
......@@ -770,7 +770,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
*/
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (err) {
pr_err("Failed to enqueue sample: %d\n", err);
......
......@@ -879,7 +879,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
} else
++session->evlist->stats.nr_unknown_events;
next_event:
perf_mmap__consume(md, opts->overwrite);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
......
......@@ -2522,7 +2522,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
trace__handle_event(trace, event, &sample);
next_event:
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (interrupted)
goto out_disable;
......
......@@ -420,7 +420,7 @@ static int process_events(struct machine *machine, struct perf_evlist *evlist,
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
ret = process_event(machine, evlist, event, state);
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (ret < 0)
return ret;
}
......
......@@ -42,7 +42,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
(pid_t)event->comm.tid == getpid() &&
strcmp(event->comm.comm, comm) == 0)
found += 1;
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}
......
......@@ -135,7 +135,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
goto out_delete_evlist;
}
nr_events[evsel->idx]++;
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
......
......@@ -101,7 +101,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
++nr_events;
if (type != PERF_RECORD_SAMPLE) {
perf_mmap__consume(md, false);
perf_mmap__consume(md);
continue;
}
......
......@@ -272,7 +272,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
++errs;
}
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}
......
......@@ -114,7 +114,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
total_periods += sample.period;
nr_samples++;
next_event:
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
......
......@@ -270,7 +270,7 @@ static int process_events(struct perf_evlist *evlist,
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
cnt += 1;
ret = add_event(evlist, &events, event);
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (ret < 0)
goto out_free_nodes;
}
......
......@@ -120,7 +120,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
if (event->header.type == PERF_RECORD_EXIT)
nr_exit++;
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
......
......@@ -118,7 +118,7 @@ void perf_mmap__put(struct perf_mmap *map)
perf_mmap__munmap(map);
}
void perf_mmap__consume(struct perf_mmap *map, bool overwrite __maybe_unused)
void perf_mmap__consume(struct perf_mmap *map)
{
if (!map->overwrite) {
u64 old = map->prev;
......@@ -260,7 +260,7 @@ int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head;
perf_mmap__consume(md, overwrite);
perf_mmap__consume(md);
return -EAGAIN;
}
......@@ -314,7 +314,7 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
}
md->prev = head;
perf_mmap__consume(md, md->overwrite);
perf_mmap__consume(md);
out:
return rc;
}
......
......@@ -66,7 +66,7 @@ void perf_mmap__munmap(struct perf_mmap *map);
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
void perf_mmap__consume(struct perf_mmap *map);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
......
......@@ -1013,7 +1013,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
/* Consume the even only after we parsed it out. */
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (err)
return PyErr_Format(PyExc_OSError,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment