@@ -2228,7 +2228,7 @@ static void __init init_freelist_randomization(void)
22282228}
22292229
22302230/* Get the next entry on the pre-computed freelist randomized */
2231- static void * next_freelist_entry (struct kmem_cache * s , struct slab * slab ,
2231+ static void * next_freelist_entry (struct kmem_cache * s ,
22322232 unsigned long * pos , void * start ,
22332233 unsigned long page_limit ,
22342234 unsigned long freelist_count )
@@ -2267,13 +2267,12 @@ static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
22672267 start = fixup_red_left (s , slab_address (slab ));
22682268
22692269 /* First entry is used as the base of the freelist */
2270- cur = next_freelist_entry (s , slab , & pos , start , page_limit ,
2271- freelist_count );
2270+ cur = next_freelist_entry (s , & pos , start , page_limit , freelist_count );
22722271 cur = setup_object (s , cur );
22732272 slab -> freelist = cur ;
22742273
22752274 for (idx = 1 ; idx < slab -> objects ; idx ++ ) {
2276- next = next_freelist_entry (s , slab , & pos , start , page_limit ,
2275+ next = next_freelist_entry (s , & pos , start , page_limit ,
22772276 freelist_count );
22782277 next = setup_object (s , next );
22792278 set_freepointer (s , cur , next );
@@ -3311,7 +3310,6 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
33113310 counters = slab -> counters ;
33123311
33133312 new .counters = counters ;
3314- VM_BUG_ON (!new .frozen );
33153313
33163314 new .inuse = slab -> objects ;
33173315 new .frozen = freelist != NULL ;
@@ -3483,18 +3481,20 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
34833481
34843482 slab = slub_percpu_partial (c );
34853483 slub_set_percpu_partial (c , slab );
3486- local_unlock_irqrestore (& s -> cpu_slab -> lock , flags );
3487- stat (s , CPU_PARTIAL_ALLOC );
34883484
3489- if (unlikely (!node_match (slab , node ) ||
3490- !pfmemalloc_match (slab , gfpflags ))) {
3491- slab -> next = NULL ;
3492- __put_partials (s , slab );
3493- continue ;
3485+ if (likely (node_match (slab , node ) &&
3486+ pfmemalloc_match (slab , gfpflags ))) {
3487+ c -> slab = slab ;
3488+ freelist = get_freelist (s , slab );
3489+ VM_BUG_ON (!freelist );
3490+ stat (s , CPU_PARTIAL_ALLOC );
3491+ goto load_freelist ;
34943492 }
34953493
3496- freelist = freeze_slab (s , slab );
3497- goto retry_load_slab ;
3494+ local_unlock_irqrestore (& s -> cpu_slab -> lock , flags );
3495+
3496+ slab -> next = NULL ;
3497+ __put_partials (s , slab );
34983498 }
34993499#endif
35003500
@@ -4172,7 +4172,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
41724172 * then add it.
41734173 */
41744174 if (!kmem_cache_has_cpu_partial (s ) && unlikely (!prior )) {
4175- remove_full (s , n , slab );
41764175 add_partial (n , slab , DEACTIVATE_TO_TAIL );
41774176 stat (s , FREE_ADD_PARTIAL );
41784177 }
@@ -4186,9 +4185,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
41864185 */
41874186 remove_partial (n , slab );
41884187 stat (s , FREE_REMOVE_PARTIAL );
4189- } else {
4190- /* Slab must be on the full list */
4191- remove_full (s , n , slab );
41924188 }
41934189
41944190 spin_unlock_irqrestore (& n -> list_lock , flags );
0 commit comments