Commit 80e53d11 authored by Jiri Olsa's avatar Jiri Olsa Committed by Arnaldo Carvalho de Melo

libperf: Adopt perf_mmap__put() function from tools/perf

Move perf_mmap__put() from tools/perf to libperf.

Once perf_mmap__put() is moved, we need a way to call application
related unmap code (AIO and aux related code for eprf), when the map
goes away.

Add the perf_mmap::unmap callback to do that.

The unmap path from perf is:

  perf_mmap__put                           (libperf)
    perf_mmap__munmap                      (libperf)
      map->unmap_cb -> perf_mmap__unmap_cb (perf)
        mmap__munmap                       (perf)

Committer notes:

Add missing linux/kernel.h to tools/perf/lib/mmap.c to get the BUG_ON
definition.
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20191007125344.14268-8-jolsa@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 59d7ea62
......@@ -197,7 +197,7 @@ static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
* every aio write request started in record__aio_push() so
* decrement it because the request is now complete.
*/
perf_mmap__put(md);
perf_mmap__put(&md->core);
rc = 1;
} else {
/*
......@@ -332,7 +332,7 @@ static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
* map->refcount is decremented in record__aio_complete() after
* aio write operation finishes successfully.
*/
perf_mmap__put(map);
perf_mmap__put(&map->core);
}
return ret;
......
......@@ -10,23 +10,28 @@
/* perf sample has 16 bits size limit */
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
struct perf_mmap;
typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
/**
* struct perf_mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
struct perf_mmap {
void *base;
int mask;
int fd;
int cpu;
refcount_t refcnt;
u64 prev;
u64 start;
u64 end;
bool overwrite;
u64 flush;
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
void *base;
int mask;
int fd;
int cpu;
refcount_t refcnt;
u64 prev;
u64 start;
u64 end;
bool overwrite;
u64 flush;
libperf_unmap_cb_t unmap_cb;
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
};
struct perf_mmap_param {
......@@ -36,10 +41,12 @@ struct perf_mmap_param {
size_t perf_mmap__mmap_len(struct perf_mmap *map);
void perf_mmap__init(struct perf_mmap *map, bool overwrite);
void perf_mmap__init(struct perf_mmap *map, bool overwrite,
libperf_unmap_cb_t unmap_cb);
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
int fd, int cpu);
void perf_mmap__munmap(struct perf_mmap *map);
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
#endif /* __LIBPERF_INTERNAL_MMAP_H */
......@@ -2,11 +2,14 @@
#include <sys/mman.h>
#include <internal/mmap.h>
#include <internal/lib.h>
#include <linux/kernel.h>
void perf_mmap__init(struct perf_mmap *map, bool overwrite)
void perf_mmap__init(struct perf_mmap *map, bool overwrite,
libperf_unmap_cb_t unmap_cb)
{
map->fd = -1;
map->overwrite = overwrite;
map->unmap_cb = unmap_cb;
refcount_set(&map->refcnt, 0);
}
......@@ -40,9 +43,19 @@ void perf_mmap__munmap(struct perf_mmap *map)
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
if (map && map->unmap_cb)
map->unmap_cb(map);
}
void perf_mmap__get(struct perf_mmap *map)
{
refcount_inc(&map->refcnt);
}
void perf_mmap__put(struct perf_mmap *map)
{
BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
if (refcount_dec_and_test(&map->refcnt))
perf_mmap__munmap(map);
}
......@@ -433,7 +433,7 @@ static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
struct mmap *map = fda->priv[fd].ptr;
if (map)
perf_mmap__put(map);
perf_mmap__put(&map->core);
}
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
......@@ -601,11 +601,11 @@ static void evlist__munmap_nofree(struct evlist *evlist)
if (evlist->mmap)
for (i = 0; i < evlist->core.nr_mmaps; i++)
mmap__munmap(&evlist->mmap[i]);
perf_mmap__munmap(&evlist->mmap[i].core);
if (evlist->overwrite_mmap)
for (i = 0; i < evlist->core.nr_mmaps; i++)
mmap__munmap(&evlist->overwrite_mmap[i]);
perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
}
void evlist__munmap(struct evlist *evlist)
......@@ -615,6 +615,13 @@ void evlist__munmap(struct evlist *evlist)
zfree(&evlist->overwrite_mmap);
}
static void perf_mmap__unmap_cb(struct perf_mmap *map)
{
struct mmap *m = container_of(map, struct mmap, core);
mmap__munmap(m);
}
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
bool overwrite)
{
......@@ -638,7 +645,7 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
* thus does perf_mmap__get() on it.
*/
perf_mmap__init(&map[i].core, overwrite);
perf_mmap__init(&map[i].core, overwrite, perf_mmap__unmap_cb);
}
return map;
......@@ -715,7 +722,7 @@ static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
*/
if (!evsel->core.system_wide &&
perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
perf_mmap__put(&maps[idx]);
perf_mmap__put(&maps[idx].core);
return -1;
}
......
......@@ -110,14 +110,6 @@ static bool perf_mmap__empty(struct mmap *map)
return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
}
void perf_mmap__put(struct mmap *map)
{
BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
if (refcount_dec_and_test(&map->core.refcnt))
mmap__munmap(map);
}
void perf_mmap__consume(struct mmap *map)
{
if (!map->core.overwrite) {
......@@ -127,7 +119,7 @@ void perf_mmap__consume(struct mmap *map)
}
if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
perf_mmap__put(map);
perf_mmap__put(&map->core);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
......@@ -308,7 +300,6 @@ static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
void mmap__munmap(struct mmap *map)
{
perf_mmap__munmap(&map->core);
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
munmap(map->data, mmap__mmap_len(map));
......
......@@ -45,8 +45,6 @@ struct mmap_params {
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
void mmap__munmap(struct mmap *map);
void perf_mmap__put(struct mmap *map);
void perf_mmap__consume(struct mmap *map);
static inline u64 perf_mmap__read_head(struct mmap *mm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment