Commit 45c815f0 authored by Alexander Shishkin's avatar Alexander Shishkin Committed by Ingo Molnar

perf: Synchronously free aux pages in case of allocation failure

We are currently using asynchronous deallocation in the error path in
AUX mmap code, which is unnecessary and also presents a problem for users
that wish to probe for the biggest possible buffer size they can get:
they'll get -EINVAL on all subsequent attemts to allocate a smaller
buffer before the asynchronous deallocation callback frees up the pages
from the previous unsuccessful attempt.

Currently, gdb does that for allocating AUX buffers for Intel PT traces.
More specifically, overwrite mode of AUX pmus that don't support hardware
sg (some implementations of Intel PT, for instance) is limited to only
one contiguous high order allocation for its buffer and there is no way
of knowing its size without trying.

This patch changes error path freeing to be synchronous as there won't
be any contenders for the AUX pages at that point.
Reported-by: default avatarMarkus Metzger <markus.t.metzger@intel.com>
Signed-off-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: vince@deater.net
Link: http://lkml.kernel.org/r/1453216469-9509-1-git-send-email-alexander.shishkin@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0e1eb0a1
...@@ -459,6 +459,25 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx) ...@@ -459,6 +459,25 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
__free_page(page); __free_page(page);
} }
static void __rb_free_aux(struct ring_buffer *rb)
{
int pg;
if (rb->aux_priv) {
rb->free_aux(rb->aux_priv);
rb->free_aux = NULL;
rb->aux_priv = NULL;
}
if (rb->aux_nr_pages) {
for (pg = 0; pg < rb->aux_nr_pages; pg++)
rb_free_aux_page(rb, pg);
kfree(rb->aux_pages);
rb->aux_nr_pages = 0;
}
}
int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
pgoff_t pgoff, int nr_pages, long watermark, int flags) pgoff_t pgoff, int nr_pages, long watermark, int flags)
{ {
...@@ -547,30 +566,11 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, ...@@ -547,30 +566,11 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
if (!ret) if (!ret)
rb->aux_pgoff = pgoff; rb->aux_pgoff = pgoff;
else else
rb_free_aux(rb); __rb_free_aux(rb);
return ret; return ret;
} }
static void __rb_free_aux(struct ring_buffer *rb)
{
int pg;
if (rb->aux_priv) {
rb->free_aux(rb->aux_priv);
rb->free_aux = NULL;
rb->aux_priv = NULL;
}
if (rb->aux_nr_pages) {
for (pg = 0; pg < rb->aux_nr_pages; pg++)
rb_free_aux_page(rb, pg);
kfree(rb->aux_pages);
rb->aux_nr_pages = 0;
}
}
void rb_free_aux(struct ring_buffer *rb) void rb_free_aux(struct ring_buffer *rb)
{ {
if (atomic_dec_and_test(&rb->aux_refcount)) if (atomic_dec_and_test(&rb->aux_refcount))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment