104104 * except the stat counters. This is a percpu structure manipulated only by
105105 * the local cpu, so the lock protects against being preempted or interrupted
106106 * by an irq. Fast path operations rely on lockless operations instead.
107- * On PREEMPT_RT, the local lock does not actually disable irqs (and thus
108- * prevent the lockless operations), so fastpath operations also need to take
109- * the lock and are no longer lockless.
107+ *
108+ * On PREEMPT_RT, the local lock neither disables interrupts nor preemption
109+ * which means the lockless fastpath cannot be used as it might interfere with
110+ * an in-progress slow path operations. In this case the local lock is always
111+ * taken but it still utilizes the freelist for the common operations.
110112 *
111113 * lockless fastpaths
112114 *
167169 * function call even on !PREEMPT_RT, use inline preempt_disable() there.
168170 */
169171#ifndef CONFIG_PREEMPT_RT
170- #define slub_get_cpu_ptr (var ) get_cpu_ptr(var)
171- #define slub_put_cpu_ptr (var ) put_cpu_ptr(var)
172+ #define slub_get_cpu_ptr (var ) get_cpu_ptr(var)
173+ #define slub_put_cpu_ptr (var ) put_cpu_ptr(var)
174+ #define USE_LOCKLESS_FAST_PATH () (true)
172175#else
173176#define slub_get_cpu_ptr (var ) \
174177({ \
@@ -180,6 +183,7 @@ do { \
180183 (void)(var); \
181184 migrate_enable(); \
182185} while (0)
186+ #define USE_LOCKLESS_FAST_PATH () (false)
183187#endif
184188
185189#ifdef CONFIG_SLUB_DEBUG
@@ -474,7 +478,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
474478 void * freelist_new , unsigned long counters_new ,
475479 const char * n )
476480{
477- if (! IS_ENABLED ( CONFIG_PREEMPT_RT ))
481+ if (USE_LOCKLESS_FAST_PATH ( ))
478482 lockdep_assert_irqs_disabled ();
479483#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE ) && \
480484 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE )
@@ -3288,14 +3292,8 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
32883292
32893293 object = c -> freelist ;
32903294 slab = c -> slab ;
3291- /*
3292- * We cannot use the lockless fastpath on PREEMPT_RT because if a
3293- * slowpath has taken the local_lock_irqsave(), it is not protected
3294- * against a fast path operation in an irq handler. So we need to take
3295- * the slow path which uses local_lock. It is still relatively fast if
3296- * there is a suitable cpu freelist.
3297- */
3298- if (IS_ENABLED (CONFIG_PREEMPT_RT ) ||
3295+
3296+ if (!USE_LOCKLESS_FAST_PATH () ||
32993297 unlikely (!object || !slab || !node_match (slab , node ))) {
33003298 object = __slab_alloc (s , gfpflags , node , addr , c );
33013299 } else {
@@ -3555,6 +3553,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
35553553 void * tail_obj = tail ? : head ;
35563554 struct kmem_cache_cpu * c ;
35573555 unsigned long tid ;
3556+ void * * freelist ;
35583557
35593558redo :
35603559 /*
@@ -3569,9 +3568,13 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
35693568 /* Same with comment on barrier() in slab_alloc_node() */
35703569 barrier ();
35713570
3572- if (likely (slab == c -> slab )) {
3573- #ifndef CONFIG_PREEMPT_RT
3574- void * * freelist = READ_ONCE (c -> freelist );
3571+ if (unlikely (slab != c -> slab )) {
3572+ __slab_free (s , slab , head , tail_obj , cnt , addr );
3573+ return ;
3574+ }
3575+
3576+ if (USE_LOCKLESS_FAST_PATH ()) {
3577+ freelist = READ_ONCE (c -> freelist );
35753578
35763579 set_freepointer (s , tail_obj , freelist );
35773580
@@ -3583,16 +3586,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
35833586 note_cmpxchg_failure ("slab_free" , s , tid );
35843587 goto redo ;
35853588 }
3586- #else /* CONFIG_PREEMPT_RT */
3587- /*
3588- * We cannot use the lockless fastpath on PREEMPT_RT because if
3589- * a slowpath has taken the local_lock_irqsave(), it is not
3590- * protected against a fast path operation in an irq handler. So
3591- * we need to take the local_lock. We shouldn't simply defer to
3592- * __slab_free() as that wouldn't use the cpu freelist at all.
3593- */
3594- void * * freelist ;
3595-
3589+ } else {
3590+ /* Update the free list under the local lock */
35963591 local_lock (& s -> cpu_slab -> lock );
35973592 c = this_cpu_ptr (s -> cpu_slab );
35983593 if (unlikely (slab != c -> slab )) {
@@ -3607,11 +3602,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
36073602 c -> tid = next_tid (tid );
36083603
36093604 local_unlock (& s -> cpu_slab -> lock );
3610- #endif
3611- stat (s , FREE_FASTPATH );
3612- } else
3613- __slab_free (s , slab , head , tail_obj , cnt , addr );
3614-
3605+ }
3606+ stat (s , FREE_FASTPATH );
36153607}
36163608
36173609static __always_inline void slab_free (struct kmem_cache * s , struct slab * slab ,
0 commit comments