Commit de506eec authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf_urgent_for_v6.2_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fix from Borislav Petkov:

 - Lock the proper critical section when dealing with perf event context

* tag 'perf_urgent_for_v6.2_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf: Fix perf_event_pmu_context serialization
parents 837c07cf 4f64a6c9
......@@ -476,6 +476,15 @@ extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
#define atomic_dec_and_raw_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
unsigned long *flags);
#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
......
......@@ -4813,19 +4813,17 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
epc = &cpc->epc;
raw_spin_lock_irq(&ctx->lock);
if (!epc->ctx) {
atomic_set(&epc->refcount, 1);
epc->embedded = 1;
raw_spin_lock_irq(&ctx->lock);
list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
epc->ctx = ctx;
raw_spin_unlock_irq(&ctx->lock);
} else {
WARN_ON_ONCE(epc->ctx != ctx);
atomic_inc(&epc->refcount);
}
raw_spin_unlock_irq(&ctx->lock);
return epc;
}
......@@ -4896,33 +4894,30 @@ static void free_epc_rcu(struct rcu_head *head)
static void put_pmu_ctx(struct perf_event_pmu_context *epc)
{
struct perf_event_context *ctx = epc->ctx;
unsigned long flags;
if (!atomic_dec_and_test(&epc->refcount))
/*
* XXX
*
* lockdep_assert_held(&ctx->mutex);
*
* can't because of the call-site in _free_event()/put_event()
* which isn't always called under ctx->mutex.
*/
if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
return;
if (epc->ctx) {
struct perf_event_context *ctx = epc->ctx;
WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
/*
* XXX
*
* lockdep_assert_held(&ctx->mutex);
*
* can't because of the call-site in _free_event()/put_event()
* which isn't always called under ctx->mutex.
*/
WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
raw_spin_lock_irqsave(&ctx->lock, flags);
list_del_init(&epc->pmu_ctx_entry);
epc->ctx = NULL;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
list_del_init(&epc->pmu_ctx_entry);
epc->ctx = NULL;
WARN_ON_ONCE(!list_empty(&epc->pinned_active));
WARN_ON_ONCE(!list_empty(&epc->flexible_active));
raw_spin_unlock_irqrestore(&ctx->lock, flags);
if (epc->embedded)
return;
......
......@@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
raw_spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
raw_spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
unsigned long *flags)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
raw_spin_lock_irqsave(lock, *flags);
if (atomic_dec_and_test(atomic))
return 1;
raw_spin_unlock_irqrestore(lock, *flags);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment