@@ -2052,9 +2052,9 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
20522052 }
20532053}
20542054
2055- static inline void mark_failed_objexts_alloc (struct slab * slab )
2055+ static inline bool mark_failed_objexts_alloc (struct slab * slab )
20562056{
2057- slab -> obj_exts = OBJEXTS_ALLOC_FAIL ;
2057+ return cmpxchg ( & slab -> obj_exts , 0 , OBJEXTS_ALLOC_FAIL ) == 0 ;
20582058}
20592059
20602060static inline void handle_failed_objexts_alloc (unsigned long obj_exts ,
@@ -2076,7 +2076,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
20762076#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
20772077
20782078static inline void mark_objexts_empty (struct slabobj_ext * obj_exts ) {}
2079- static inline void mark_failed_objexts_alloc (struct slab * slab ) {}
2079+ static inline bool mark_failed_objexts_alloc (struct slab * slab ) { return false; }
20802080static inline void handle_failed_objexts_alloc (unsigned long obj_exts ,
20812081 struct slabobj_ext * vec , unsigned int objects ) {}
20822082
@@ -2124,8 +2124,14 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21242124 slab_nid (slab ));
21252125 }
21262126 if (!vec ) {
2127- /* Mark vectors which failed to allocate */
2128- mark_failed_objexts_alloc (slab );
2127+ /*
2128+ * Try to mark vectors which failed to allocate.
2129+ * If this operation fails, there may be a racing process
2130+ * that has already completed the allocation.
2131+ */
2132+ if (!mark_failed_objexts_alloc (slab ) &&
2133+ slab_obj_exts (slab ))
2134+ return 0 ;
21292135
21302136 return - ENOMEM ;
21312137 }
@@ -2136,6 +2142,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21362142#ifdef CONFIG_MEMCG
21372143 new_exts |= MEMCG_DATA_OBJEXTS ;
21382144#endif
2145+ retry :
21392146 old_exts = READ_ONCE (slab -> obj_exts );
21402147 handle_failed_objexts_alloc (old_exts , vec , objects );
21412148 if (new_slab ) {
@@ -2145,8 +2152,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21452152 * be simply assigned.
21462153 */
21472154 slab -> obj_exts = new_exts ;
2148- } else if ((old_exts & ~OBJEXTS_FLAGS_MASK ) ||
2149- cmpxchg (& slab -> obj_exts , old_exts , new_exts ) != old_exts ) {
2155+ } else if (old_exts & ~OBJEXTS_FLAGS_MASK ) {
21502156 /*
21512157 * If the slab is already in use, somebody can allocate and
21522158 * assign slabobj_exts in parallel. In this case the existing
@@ -2158,6 +2164,9 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21582164 else
21592165 kfree (vec );
21602166 return 0 ;
2167+ } else if (cmpxchg (& slab -> obj_exts , old_exts , new_exts ) != old_exts ) {
2168+ /* Retry if a racing thread changed slab->obj_exts from under us. */
2169+ goto retry ;
21612170 }
21622171
21632172 if (allow_spin )
@@ -3419,7 +3428,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
34193428
34203429 if (!allow_spin && !spin_trylock_irqsave (& n -> list_lock , flags )) {
34213430 /* Unlucky, discard newly allocated slab */
3422- slab -> frozen = 1 ;
34233431 defer_deactivate_slab (slab , NULL );
34243432 return NULL ;
34253433 }
@@ -6468,9 +6476,12 @@ static void free_deferred_objects(struct irq_work *work)
64686476 struct slab * slab = container_of (pos , struct slab , llnode );
64696477
64706478#ifdef CONFIG_SLUB_TINY
6471- discard_slab (slab -> slab_cache , slab );
6479+ free_slab (slab -> slab_cache , slab );
64726480#else
6473- deactivate_slab (slab -> slab_cache , slab , slab -> flush_freelist );
6481+ if (slab -> frozen )
6482+ deactivate_slab (slab -> slab_cache , slab , slab -> flush_freelist );
6483+ else
6484+ free_slab (slab -> slab_cache , slab );
64746485#endif
64756486 }
64766487}
0 commit comments