@@ -2228,7 +2228,7 @@ static void __init init_freelist_randomization(void)
2228
2228
}
2229
2229
2230
2230
/* Get the next entry on the pre-computed freelist randomized */
2231
- static void * next_freelist_entry (struct kmem_cache * s , struct slab * slab ,
2231
+ static void * next_freelist_entry (struct kmem_cache * s ,
2232
2232
unsigned long * pos , void * start ,
2233
2233
unsigned long page_limit ,
2234
2234
unsigned long freelist_count )
@@ -2267,13 +2267,12 @@ static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2267
2267
start = fixup_red_left (s , slab_address (slab ));
2268
2268
2269
2269
/* First entry is used as the base of the freelist */
2270
- cur = next_freelist_entry (s , slab , & pos , start , page_limit ,
2271
- freelist_count );
2270
+ cur = next_freelist_entry (s , & pos , start , page_limit , freelist_count );
2272
2271
cur = setup_object (s , cur );
2273
2272
slab -> freelist = cur ;
2274
2273
2275
2274
for (idx = 1 ; idx < slab -> objects ; idx ++ ) {
2276
- next = next_freelist_entry (s , slab , & pos , start , page_limit ,
2275
+ next = next_freelist_entry (s , & pos , start , page_limit ,
2277
2276
freelist_count );
2278
2277
next = setup_object (s , next );
2279
2278
set_freepointer (s , cur , next );
@@ -3311,7 +3310,6 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
3311
3310
counters = slab -> counters ;
3312
3311
3313
3312
new .counters = counters ;
3314
- VM_BUG_ON (!new .frozen );
3315
3313
3316
3314
new .inuse = slab -> objects ;
3317
3315
new .frozen = freelist != NULL ;
@@ -3483,18 +3481,20 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3483
3481
3484
3482
slab = slub_percpu_partial (c );
3485
3483
slub_set_percpu_partial (c , slab );
3486
- local_unlock_irqrestore (& s -> cpu_slab -> lock , flags );
3487
- stat (s , CPU_PARTIAL_ALLOC );
3488
3484
3489
- if (unlikely (!node_match (slab , node ) ||
3490
- !pfmemalloc_match (slab , gfpflags ))) {
3491
- slab -> next = NULL ;
3492
- __put_partials (s , slab );
3493
- continue ;
3485
+ if (likely (node_match (slab , node ) &&
3486
+ pfmemalloc_match (slab , gfpflags ))) {
3487
+ c -> slab = slab ;
3488
+ freelist = get_freelist (s , slab );
3489
+ VM_BUG_ON (!freelist );
3490
+ stat (s , CPU_PARTIAL_ALLOC );
3491
+ goto load_freelist ;
3494
3492
}
3495
3493
3496
- freelist = freeze_slab (s , slab );
3497
- goto retry_load_slab ;
3494
+ local_unlock_irqrestore (& s -> cpu_slab -> lock , flags );
3495
+
3496
+ slab -> next = NULL ;
3497
+ __put_partials (s , slab );
3498
3498
}
3499
3499
#endif
3500
3500
@@ -4172,7 +4172,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
4172
4172
* then add it.
4173
4173
*/
4174
4174
if (!kmem_cache_has_cpu_partial (s ) && unlikely (!prior )) {
4175
- remove_full (s , n , slab );
4176
4175
add_partial (n , slab , DEACTIVATE_TO_TAIL );
4177
4176
stat (s , FREE_ADD_PARTIAL );
4178
4177
}
@@ -4186,9 +4185,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
4186
4185
*/
4187
4186
remove_partial (n , slab );
4188
4187
stat (s , FREE_REMOVE_PARTIAL );
4189
- } else {
4190
- /* Slab must be on the full list */
4191
- remove_full (s , n , slab );
4192
4188
}
4193
4189
4194
4190
spin_unlock_irqrestore (& n -> list_lock , flags );
0 commit comments