@@ -68,6 +68,8 @@ static DEFINE_RAW_SPINLOCK(pool_lock);
68
68
static struct obj_pool pool_global ;
69
69
static struct obj_pool pool_to_free ;
70
70
71
+ static HLIST_HEAD (pool_boot );
72
+
71
73
/*
72
74
* Because of the presence of percpu free pools, obj_pool_free will
73
75
* under-count those in the percpu free pools. Similarly, obj_pool_used
@@ -278,6 +280,9 @@ alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *d
278
280
percpu_pool -> obj_free -- ;
279
281
goto init_obj ;
280
282
}
283
+ } else {
284
+ obj = __alloc_object (& pool_boot );
285
+ goto init_obj ;
281
286
}
282
287
283
288
raw_spin_lock (& pool_lock );
@@ -381,12 +386,14 @@ static void __free_object(struct debug_obj *obj)
381
386
struct debug_obj * objs [ODEBUG_BATCH_SIZE ];
382
387
struct debug_percpu_free * percpu_pool ;
383
388
int lookahead_count = 0 ;
384
- unsigned long flags ;
385
389
bool work ;
386
390
387
- local_irq_save (flags );
388
- if (!obj_cache )
389
- goto free_to_obj_pool ;
391
+ guard (irqsave )();
392
+
393
+ if (unlikely (!obj_cache )) {
394
+ hlist_add_head (& obj -> node , & pool_boot );
395
+ return ;
396
+ }
390
397
391
398
/*
392
399
* Try to free it into the percpu pool first.
@@ -395,7 +402,6 @@ static void __free_object(struct debug_obj *obj)
395
402
if (percpu_pool -> obj_free < ODEBUG_POOL_PERCPU_SIZE ) {
396
403
hlist_add_head (& obj -> node , & percpu_pool -> free_objs );
397
404
percpu_pool -> obj_free ++ ;
398
- local_irq_restore (flags );
399
405
return ;
400
406
}
401
407
@@ -410,7 +416,6 @@ static void __free_object(struct debug_obj *obj)
410
416
percpu_pool -> obj_free -- ;
411
417
}
412
418
413
- free_to_obj_pool :
414
419
raw_spin_lock (& pool_lock );
415
420
work = (pool_global .cnt > debug_objects_pool_size ) && obj_cache &&
416
421
(pool_to_free .cnt < ODEBUG_FREE_WORK_MAX );
@@ -455,7 +460,6 @@ static void __free_object(struct debug_obj *obj)
455
460
}
456
461
}
457
462
raw_spin_unlock (& pool_lock );
458
- local_irq_restore (flags );
459
463
}
460
464
461
465
/*
@@ -1341,10 +1345,9 @@ void __init debug_objects_early_init(void)
1341
1345
for (i = 0 ; i < ODEBUG_HASH_SIZE ; i ++ )
1342
1346
raw_spin_lock_init (& obj_hash [i ].lock );
1343
1347
1348
+ /* Keep early boot simple and add everything to the boot list */
1344
1349
for (i = 0 ; i < ODEBUG_POOL_SIZE ; i ++ )
1345
- hlist_add_head (& obj_static_pool [i ].node , & pool_global .objects );
1346
-
1347
- pool_global .cnt = ODEBUG_POOL_SIZE ;
1350
+ hlist_add_head (& obj_static_pool [i ].node , & pool_boot );
1348
1351
}
1349
1352
1350
1353
/*
@@ -1372,10 +1375,11 @@ static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache
1372
1375
pool_global .cnt = ODEBUG_POOL_SIZE ;
1373
1376
1374
1377
/*
1375
- * Replace the statically allocated objects list with the allocated
1376
- * objects list .
1378
+ * Move the allocated objects to the global pool and disconnect the
1379
+ * boot pool .
1377
1380
*/
1378
1381
hlist_move_list (& objects , & pool_global .objects );
1382
+ pool_boot .first = NULL ;
1379
1383
1380
1384
/* Replace the active object references */
1381
1385
for (i = 0 ; i < ODEBUG_HASH_SIZE ; i ++ , db ++ ) {
0 commit comments