Skip to content

Commit 83382af

Browse files
Alexei Starovoitovtehcaster
authored andcommitted
slab: Make slub local_(try)lock more precise for LOCKDEP
kmalloc_nolock() can be called from any context the ___slab_alloc() can acquire local_trylock_t (which is rt_spin_lock in PREEMPT_RT) and attempt to acquire a different local_trylock_t while in the same task context. The calling sequence might look like: kmalloc() -> tracepoint -> bpf -> kmalloc_nolock() or more precisely: __lock_acquire+0x12ad/0x2590 lock_acquire+0x133/0x2d0 rt_spin_lock+0x6f/0x250 ___slab_alloc+0xb7/0xec0 kmalloc_nolock_noprof+0x15a/0x430 my_debug_callback+0x20e/0x390 [testmod] ___slab_alloc+0x256/0xec0 __kmalloc_cache_noprof+0xd6/0x3b0 Make LOCKDEP understand that local_trylock_t-s protect different kmem_caches. In order to do that add lock_class_key for each kmem_cache and use that key in local_trylock_t. This stack trace is possible on both PREEMPT_RT and !PREEMPT_RT, but teach lockdep about it only for PREEMPT_RT, since in !PREEMPT_RT the ___slab_alloc() code is using local_trylock_irqsave() when lockdep is on. Note, this patch applies this logic to local_lock_t while the next one converts it to local_trylock_t. Both are mapped to rt_spin_lock in PREEMPT_RT. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent d7242af commit 83382af

File tree

2 files changed

+21
-0
lines changed

2 files changed

+21
-0
lines changed

mm/slab.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,7 @@ struct kmem_cache_order_objects {
234234
struct kmem_cache {
235235
#ifndef CONFIG_SLUB_TINY
236236
struct kmem_cache_cpu __percpu *cpu_slab;
237+
struct lock_class_key lock_key;
237238
#endif
238239
struct slub_percpu_sheaves __percpu *cpu_sheaves;
239240
/* Used for retrieving partial slabs, etc. */

mm/slub.c

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3586,12 +3586,29 @@ static inline void note_cmpxchg_failure(const char *n,
35863586

35873587
static void init_kmem_cache_cpus(struct kmem_cache *s)
35883588
{
3589+
#ifdef CONFIG_PREEMPT_RT
3590+
/*
3591+
* Register lockdep key for non-boot kmem caches to avoid
3592+
* WARN_ON_ONCE(static_obj(key))) in lockdep_register_key()
3593+
*/
3594+
bool finegrain_lockdep = !init_section_contains(s, 1);
3595+
#else
3596+
/*
3597+
* Don't bother with different lockdep classes for each
3598+
* kmem_cache, since we only use local_trylock_irqsave().
3599+
*/
3600+
bool finegrain_lockdep = false;
3601+
#endif
35893602
int cpu;
35903603
struct kmem_cache_cpu *c;
35913604

3605+
if (finegrain_lockdep)
3606+
lockdep_register_key(&s->lock_key);
35923607
for_each_possible_cpu(cpu) {
35933608
c = per_cpu_ptr(s->cpu_slab, cpu);
35943609
local_lock_init(&c->lock);
3610+
if (finegrain_lockdep)
3611+
lockdep_set_class(&c->lock, &s->lock_key);
35953612
c->tid = init_tid(cpu);
35963613
}
35973614
}
@@ -7210,6 +7227,9 @@ void __kmem_cache_release(struct kmem_cache *s)
72107227
if (s->cpu_sheaves)
72117228
pcs_destroy(s);
72127229
#ifndef CONFIG_SLUB_TINY
7230+
#ifdef CONFIG_PREEMPT_RT
7231+
lockdep_unregister_key(&s->lock_key);
7232+
#endif
72137233
free_percpu(s->cpu_slab);
72147234
#endif
72157235
free_kmem_cache_nodes(s);

0 commit comments

Comments
 (0)