Commit e2039887 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf_urgent_for_v6.11_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Borislav Petkov:

 - Fix perf's AUX buffer serialization

 - Prevent uninitialized struct members in perf's uprobes handling

* tag 'perf_urgent_for_v6.11_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/aux: Fix AUX buffer serialization
  uprobes: Use kzalloc to allocate xol area
parents 5dadc1be 2ab9d830
...@@ -1255,8 +1255,9 @@ static void put_ctx(struct perf_event_context *ctx) ...@@ -1255,8 +1255,9 @@ static void put_ctx(struct perf_event_context *ctx)
* perf_event_context::mutex * perf_event_context::mutex
* perf_event::child_mutex; * perf_event::child_mutex;
* perf_event_context::lock * perf_event_context::lock
* perf_event::mmap_mutex
* mmap_lock * mmap_lock
* perf_event::mmap_mutex
* perf_buffer::aux_mutex
* perf_addr_filters_head::lock * perf_addr_filters_head::lock
* *
* cpu_hotplug_lock * cpu_hotplug_lock
...@@ -6373,12 +6374,11 @@ static void perf_mmap_close(struct vm_area_struct *vma) ...@@ -6373,12 +6374,11 @@ static void perf_mmap_close(struct vm_area_struct *vma)
event->pmu->event_unmapped(event, vma->vm_mm); event->pmu->event_unmapped(event, vma->vm_mm);
/* /*
* rb->aux_mmap_count will always drop before rb->mmap_count and * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
* event->mmap_count, so it is ok to use event->mmap_mutex to * to avoid complications.
* serialize with perf_mmap here.
*/ */
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
/* /*
* Stop all AUX events that are writing to this buffer, * Stop all AUX events that are writing to this buffer,
* so that we can free its AUX pages and corresponding PMU * so that we can free its AUX pages and corresponding PMU
...@@ -6395,7 +6395,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) ...@@ -6395,7 +6395,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
rb_free_aux(rb); rb_free_aux(rb);
WARN_ON_ONCE(refcount_read(&rb->aux_refcount)); WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
mutex_unlock(&event->mmap_mutex); mutex_unlock(&rb->aux_mutex);
} }
if (atomic_dec_and_test(&rb->mmap_count)) if (atomic_dec_and_test(&rb->mmap_count))
...@@ -6483,6 +6483,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -6483,6 +6483,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
struct perf_event *event = file->private_data; struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit; unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user(); struct user_struct *user = current_user();
struct mutex *aux_mutex = NULL;
struct perf_buffer *rb = NULL; struct perf_buffer *rb = NULL;
unsigned long locked, lock_limit; unsigned long locked, lock_limit;
unsigned long vma_size; unsigned long vma_size;
...@@ -6531,6 +6532,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -6531,6 +6532,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!rb) if (!rb)
goto aux_unlock; goto aux_unlock;
aux_mutex = &rb->aux_mutex;
mutex_lock(aux_mutex);
aux_offset = READ_ONCE(rb->user_page->aux_offset); aux_offset = READ_ONCE(rb->user_page->aux_offset);
aux_size = READ_ONCE(rb->user_page->aux_size); aux_size = READ_ONCE(rb->user_page->aux_size);
...@@ -6681,6 +6685,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -6681,6 +6685,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
atomic_dec(&rb->mmap_count); atomic_dec(&rb->mmap_count);
} }
aux_unlock: aux_unlock:
if (aux_mutex)
mutex_unlock(aux_mutex);
mutex_unlock(&event->mmap_mutex); mutex_unlock(&event->mmap_mutex);
/* /*
......
...@@ -40,6 +40,7 @@ struct perf_buffer { ...@@ -40,6 +40,7 @@ struct perf_buffer {
struct user_struct *mmap_user; struct user_struct *mmap_user;
/* AUX area */ /* AUX area */
struct mutex aux_mutex;
long aux_head; long aux_head;
unsigned int aux_nest; unsigned int aux_nest;
long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */ long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
......
...@@ -337,6 +337,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags) ...@@ -337,6 +337,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
*/ */
if (!rb->nr_pages) if (!rb->nr_pages)
rb->paused = 1; rb->paused = 1;
mutex_init(&rb->aux_mutex);
} }
void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags) void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
......
...@@ -1489,7 +1489,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) ...@@ -1489,7 +1489,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
struct xol_area *area; struct xol_area *area;
void *insns; void *insns;
area = kmalloc(sizeof(*area), GFP_KERNEL); area = kzalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area)) if (unlikely(!area))
goto out; goto out;
...@@ -1499,7 +1499,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) ...@@ -1499,7 +1499,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
goto free_area; goto free_area;
area->xol_mapping.name = "[uprobes]"; area->xol_mapping.name = "[uprobes]";
area->xol_mapping.fault = NULL;
area->xol_mapping.pages = area->pages; area->xol_mapping.pages = area->pages;
area->pages[0] = alloc_page(GFP_HIGHUSER); area->pages[0] = alloc_page(GFP_HIGHUSER);
if (!area->pages[0]) if (!area->pages[0])
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment