Commit b9bae2c8 authored by Kan Liang's avatar Kan Liang Committed by Arnaldo Carvalho de Melo

perf mmap: Simplify perf_mmap__read_init()

It isn't necessary to pass the 'start', 'end' and 'overwrite' arguments
to perf_mmap__read_init().  The data is stored in the struct perf_mmap.

Discard the parameters.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Suggested-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1520350567-80082-8-git-send-email-kan.liang@linux.intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 0019dc87
......@@ -61,7 +61,6 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
u64 test_tsc, comm1_tsc, comm2_tsc;
u64 test_time, comm1_time = 0, comm2_time = 0;
struct perf_mmap *md;
u64 end, start;
threads = thread_map__new(-1, getpid(), UINT_MAX);
CHECK_NOT_NULL__(threads);
......@@ -112,7 +111,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
for (i = 0; i < evlist->nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -746,14 +746,13 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
struct perf_evlist *evlist = kvm->evlist;
union perf_event *event;
struct perf_mmap *md;
u64 end, start;
u64 timestamp;
s64 n = 0;
int err;
*mmap_time = ULLONG_MAX;
md = &evlist->mmap[idx];
err = perf_mmap__read_init(md, false, &start, &end);
err = perf_mmap__read_init(md);
if (err < 0)
return (err == -EAGAIN) ? 0 : -1;
......
......@@ -817,11 +817,10 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
struct perf_session *session = top->session;
union perf_event *event;
struct machine *machine;
u64 end, start;
int ret;
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
return;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -2503,10 +2503,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
struct perf_mmap *md;
u64 end, start;
md = &evlist->mmap[i];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -33,9 +33,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
for (i = 0; i < evlist->nr_mmaps; i++) {
struct perf_mmap *map = &evlist->overwrite_mmap[i];
union perf_event *event;
u64 start, end;
perf_mmap__read_init(map, true, &start, &end);
perf_mmap__read_init(map);
while ((event = perf_mmap__read_event(map)) != NULL) {
const u32 type = event->header.type;
......
......@@ -177,10 +177,9 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
struct perf_mmap *md;
u64 end, start;
md = &evlist->mmap[i];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -410,12 +410,11 @@ static int process_events(struct machine *machine, struct perf_evlist *evlist,
{
union perf_event *event;
struct perf_mmap *md;
u64 end, start;
int i, ret;
for (i = 0; i < evlist->nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -28,13 +28,12 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
{
union perf_event *event;
struct perf_mmap *md;
u64 end, start;
int i, found;
found = 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
if (event->header.type == PERF_RECORD_COMM &&
......
......@@ -39,7 +39,6 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
struct perf_evsel *evsels[nsyscalls], *evsel;
char sbuf[STRERR_BUFSIZE];
struct perf_mmap *md;
u64 end, start;
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
......@@ -109,7 +108,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
}
md = &evlist->mmap[0];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
goto out_init;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -87,10 +87,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
struct perf_mmap *md;
u64 end, start;
md = &evlist->mmap[i];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -165,10 +165,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
struct perf_mmap *md;
u64 end, start;
md = &evlist->mmap[i];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -40,7 +40,6 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
struct cpu_map *cpus;
struct thread_map *threads;
struct perf_mmap *md;
u64 end, start;
attr.sample_freq = 500;
......@@ -96,7 +95,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
perf_evlist__disable(evlist);
md = &evlist->mmap[0];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
goto out_init;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -259,12 +259,11 @@ static int process_events(struct perf_evlist *evlist,
LIST_HEAD(events);
struct event_node *events_array, *node;
struct perf_mmap *md;
u64 end, start;
int i, ret;
for (i = 0; i < evlist->nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -48,7 +48,6 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
struct cpu_map *cpus;
struct thread_map *threads;
struct perf_mmap *md;
u64 end, start;
signal(SIGCHLD, sig_handler);
......@@ -113,7 +112,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
retry:
md = &evlist->mmap[0];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
goto out_init;
while ((event = perf_mmap__read_event(md)) != NULL) {
......
......@@ -235,16 +235,13 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u6
/*
* Report the start and end of the available data in ringbuffer
*/
int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
u64 *startp, u64 *endp)
int perf_mmap__read_init(struct perf_mmap *md)
{
u64 head = perf_mmap__read_head(md);
u64 old = md->prev;
unsigned char *data = md->base + page_size;
unsigned long size;
*startp = overwrite ? head : old;
*endp = overwrite ? old : head;
md->start = md->overwrite ? head : old;
md->end = md->overwrite ? old : head;
......@@ -267,8 +264,6 @@ int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
*/
if (overwrite_rb_find_range(data, md->mask, head, &md->start, &md->end))
return -EINVAL;
*startp = md->start;
*endp = md->end;
}
return 0;
......@@ -278,13 +273,12 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
int push(void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
u64 end, start;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
rc = perf_mmap__read_init(md, md->overwrite, &start, &end);
rc = perf_mmap__read_init(md);
if (rc < 0)
return (rc == -EAGAIN) ? 0 : -1;
......
......@@ -96,7 +96,6 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
size_t perf_mmap__mmap_len(struct perf_mmap *map);
int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
u64 *startp, u64 *endp);
int perf_mmap__read_init(struct perf_mmap *md);
void perf_mmap__read_done(struct perf_mmap *map);
#endif /*__PERF_MMAP_H */
......@@ -984,7 +984,6 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
int sample_id_all = 1, cpu;
static char *kwlist[] = { "cpu", "sample_id_all", NULL };
struct perf_mmap *md;
u64 end, start;
int err;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
......@@ -992,7 +991,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
return NULL;
md = &evlist->mmap[cpu];
if (perf_mmap__read_init(md, false, &start, &end) < 0)
if (perf_mmap__read_init(md) < 0)
goto end;
event = perf_mmap__read_event(md);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment