Commit a7fc726b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A set of perf related fixes:

   - fix a CR4.PCE propagation issue caused by usage of mm instead of
     active_mm and therefore propagated the wrong value.

   - perf core fixes, which plug a use-after-free issue and make the
     event inheritance on fork more robust.

   - a tooling fix for symbol handling"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf symbols: Fix symbols__fixup_end heuristic for corner cases
  x86/perf: Clarify why x86_pmu_event_mapped() isn't racy
  x86/perf: Fix CR4.PCE propagation to use active_mm instead of mm
  perf/core: Better explain the inherit magic
  perf/core: Simplify perf_event_free_task()
  perf/core: Fix event inheritance on fork()
  perf/core: Fix use-after-free in perf_release()
parents cd21debe a01851fa
...@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event) ...@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
static void refresh_pce(void *ignored) static void refresh_pce(void *ignored)
{ {
if (current->mm) if (current->active_mm)
load_mm_cr4(current->mm); load_mm_cr4(current->active_mm);
} }
static void x86_pmu_event_mapped(struct perf_event *event) static void x86_pmu_event_mapped(struct perf_event *event)
...@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event) ...@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return; return;
/*
* This function relies on not being called concurrently in two
* tasks in the same mm. Otherwise one task could observe
* perf_rdpmc_allowed > 1 and return all the way back to
* userspace with CR4.PCE clear while another task is still
* doing on_each_cpu_mask() to propagate CR4.PCE.
*
* For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution.
*/
lockdep_assert_held_exclusive(&current->mm->mmap_sem);
if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1) if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
} }
......
...@@ -4256,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event) ...@@ -4256,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event)
raw_spin_lock_irq(&ctx->lock); raw_spin_lock_irq(&ctx->lock);
/* /*
* Mark this even as STATE_DEAD, there is no external reference to it * Mark this event as STATE_DEAD, there is no external reference to it
* anymore. * anymore.
* *
* Anybody acquiring event->child_mutex after the below loop _must_ * Anybody acquiring event->child_mutex after the below loop _must_
...@@ -10417,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task) ...@@ -10417,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task)
continue; continue;
mutex_lock(&ctx->mutex); mutex_lock(&ctx->mutex);
again: raw_spin_lock_irq(&ctx->lock);
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, /*
group_entry) * Destroy the task <-> ctx relation and mark the context dead.
perf_free_event(event, ctx); *
* This is important because even though the task hasn't been
* exposed yet the context has been (through child_list).
*/
RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
put_task_struct(task); /* cannot be last */
raw_spin_unlock_irq(&ctx->lock);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
group_entry)
perf_free_event(event, ctx); perf_free_event(event, ctx);
if (!list_empty(&ctx->pinned_groups) ||
!list_empty(&ctx->flexible_groups))
goto again;
mutex_unlock(&ctx->mutex); mutex_unlock(&ctx->mutex);
put_ctx(ctx); put_ctx(ctx);
} }
} }
...@@ -10469,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event) ...@@ -10469,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
} }
/* /*
* inherit a event from parent task to child task: * Inherit a event from parent task to child task.
*
* Returns:
* - valid pointer on success
* - NULL for orphaned events
* - IS_ERR() on error
*/ */
static struct perf_event * static struct perf_event *
inherit_event(struct perf_event *parent_event, inherit_event(struct perf_event *parent_event,
...@@ -10563,6 +10569,16 @@ inherit_event(struct perf_event *parent_event, ...@@ -10563,6 +10569,16 @@ inherit_event(struct perf_event *parent_event,
return child_event; return child_event;
} }
/*
* Inherits an event group.
*
* This will quietly suppress orphaned events; !inherit_event() is not an error.
* This matches with perf_event_release_kernel() removing all child events.
*
* Returns:
* - 0 on success
* - <0 on error
*/
static int inherit_group(struct perf_event *parent_event, static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent, struct task_struct *parent,
struct perf_event_context *parent_ctx, struct perf_event_context *parent_ctx,
...@@ -10577,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event, ...@@ -10577,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event,
child, NULL, child_ctx); child, NULL, child_ctx);
if (IS_ERR(leader)) if (IS_ERR(leader))
return PTR_ERR(leader); return PTR_ERR(leader);
/*
* @leader can be NULL here because of is_orphaned_event(). In this
* case inherit_event() will create individual events, similar to what
* perf_group_detach() would do anyway.
*/
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx, child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx); child, leader, child_ctx);
...@@ -10586,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event, ...@@ -10586,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event,
return 0; return 0;
} }
/*
* Creates the child task context and tries to inherit the event-group.
*
* Clears @inherited_all on !attr.inherited or error. Note that we'll leave
* inherited_all set when we 'fail' to inherit an orphaned event; this is
* consistent with perf_event_release_kernel() removing all child events.
*
* Returns:
* - 0 on success
* - <0 on error
*/
static int static int
inherit_task_group(struct perf_event *event, struct task_struct *parent, inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx, struct perf_event_context *parent_ctx,
...@@ -10608,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, ...@@ -10608,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
* First allocate and initialize a context for the * First allocate and initialize a context for the
* child. * child.
*/ */
child_ctx = alloc_perf_context(parent_ctx->pmu, child); child_ctx = alloc_perf_context(parent_ctx->pmu, child);
if (!child_ctx) if (!child_ctx)
return -ENOMEM; return -ENOMEM;
...@@ -10670,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) ...@@ -10670,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx, ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all); child, ctxn, &inherited_all);
if (ret) if (ret)
break; goto out_unlock;
} }
/* /*
...@@ -10686,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) ...@@ -10686,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx, ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all); child, ctxn, &inherited_all);
if (ret) if (ret)
break; goto out_unlock;
} }
raw_spin_lock_irqsave(&parent_ctx->lock, flags); raw_spin_lock_irqsave(&parent_ctx->lock, flags);
...@@ -10714,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) ...@@ -10714,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
} }
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
out_unlock:
mutex_unlock(&parent_ctx->mutex); mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx); perf_unpin_context(parent_ctx);
......
...@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols) ...@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
/* Last entry */ /* Last entry */
if (curr->end == curr->start) if (curr->end == curr->start)
curr->end = roundup(curr->start, 4096); curr->end = roundup(curr->start, 4096) + 4096;
} }
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment