Skip to content

Commit de506ee

Browse files
committed
Merge tag 'perf_urgent_for_v6.2_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fix from Borislav Petkov: - Lock the proper critical section when dealing with perf event context * tag 'perf_urgent_for_v6.2_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Fix perf_event_pmu_context serialization
2 parents 837c07c + 4f64a6c commit de506ee

File tree

3 files changed

+57
-22
lines changed

3 files changed

+57
-22
lines changed

include/linux/spinlock.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,15 @@ extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
476476
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
477477
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
478478

479+
extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
480+
#define atomic_dec_and_raw_lock(atomic, lock) \
481+
__cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
482+
483+
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
484+
unsigned long *flags);
485+
#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
486+
__cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
487+
479488
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
480489
size_t max_size, unsigned int cpu_mult,
481490
gfp_t gfp, const char *name,

kernel/events/core.c

Lines changed: 17 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -4813,19 +4813,17 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
48134813

48144814
cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
48154815
epc = &cpc->epc;
4816-
4816+
raw_spin_lock_irq(&ctx->lock);
48174817
if (!epc->ctx) {
48184818
atomic_set(&epc->refcount, 1);
48194819
epc->embedded = 1;
4820-
raw_spin_lock_irq(&ctx->lock);
48214820
list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
48224821
epc->ctx = ctx;
4823-
raw_spin_unlock_irq(&ctx->lock);
48244822
} else {
48254823
WARN_ON_ONCE(epc->ctx != ctx);
48264824
atomic_inc(&epc->refcount);
48274825
}
4828-
4826+
raw_spin_unlock_irq(&ctx->lock);
48294827
return epc;
48304828
}
48314829

@@ -4896,33 +4894,30 @@ static void free_epc_rcu(struct rcu_head *head)
48964894

48974895
static void put_pmu_ctx(struct perf_event_pmu_context *epc)
48984896
{
4897+
struct perf_event_context *ctx = epc->ctx;
48994898
unsigned long flags;
49004899

4901-
if (!atomic_dec_and_test(&epc->refcount))
4900+
/*
4901+
* XXX
4902+
*
4903+
* lockdep_assert_held(&ctx->mutex);
4904+
*
4905+
* can't because of the call-site in _free_event()/put_event()
4906+
* which isn't always called under ctx->mutex.
4907+
*/
4908+
if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
49024909
return;
49034910

4904-
if (epc->ctx) {
4905-
struct perf_event_context *ctx = epc->ctx;
4911+
WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
49064912

4907-
/*
4908-
* XXX
4909-
*
4910-
* lockdep_assert_held(&ctx->mutex);
4911-
*
4912-
* can't because of the call-site in _free_event()/put_event()
4913-
* which isn't always called under ctx->mutex.
4914-
*/
4915-
4916-
WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
4917-
raw_spin_lock_irqsave(&ctx->lock, flags);
4918-
list_del_init(&epc->pmu_ctx_entry);
4919-
epc->ctx = NULL;
4920-
raw_spin_unlock_irqrestore(&ctx->lock, flags);
4921-
}
4913+
list_del_init(&epc->pmu_ctx_entry);
4914+
epc->ctx = NULL;
49224915

49234916
WARN_ON_ONCE(!list_empty(&epc->pinned_active));
49244917
WARN_ON_ONCE(!list_empty(&epc->flexible_active));
49254918

4919+
raw_spin_unlock_irqrestore(&ctx->lock, flags);
4920+
49264921
if (epc->embedded)
49274922
return;
49284923

lib/dec_and_lock.c

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
4949
return 0;
5050
}
5151
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
52+
53+
int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
54+
{
55+
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
56+
if (atomic_add_unless(atomic, -1, 1))
57+
return 0;
58+
59+
/* Otherwise do it the slow way */
60+
raw_spin_lock(lock);
61+
if (atomic_dec_and_test(atomic))
62+
return 1;
63+
raw_spin_unlock(lock);
64+
return 0;
65+
}
66+
EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
67+
68+
int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
69+
unsigned long *flags)
70+
{
71+
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
72+
if (atomic_add_unless(atomic, -1, 1))
73+
return 0;
74+
75+
/* Otherwise do it the slow way */
76+
raw_spin_lock_irqsave(lock, *flags);
77+
if (atomic_dec_and_test(atomic))
78+
return 1;
79+
raw_spin_unlock_irqrestore(lock, *flags);
80+
return 0;
81+
}
82+
EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);

0 commit comments

Comments
 (0)