@@ -125,6 +125,20 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
125
125
[ODEBUG_STATE_NOTAVAILABLE ] = "not available" ,
126
126
};
127
127
128
+ static void free_object_list (struct hlist_head * head )
129
+ {
130
+ struct hlist_node * tmp ;
131
+ struct debug_obj * obj ;
132
+ int cnt = 0 ;
133
+
134
+ hlist_for_each_entry_safe (obj , tmp , head , node ) {
135
+ hlist_del (& obj -> node );
136
+ kmem_cache_free (obj_cache , obj );
137
+ cnt ++ ;
138
+ }
139
+ debug_objects_freed += cnt ;
140
+ }
141
+
128
142
static void fill_pool (void )
129
143
{
130
144
gfp_t gfp = __GFP_HIGH | __GFP_NOWARN ;
@@ -286,7 +300,6 @@ alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *d
286
300
*/
287
301
static void free_obj_work (struct work_struct * work )
288
302
{
289
- struct hlist_node * tmp ;
290
303
struct debug_obj * obj ;
291
304
unsigned long flags ;
292
305
HLIST_HEAD (tofree );
@@ -323,15 +336,11 @@ static void free_obj_work(struct work_struct *work)
323
336
*/
324
337
if (obj_nr_tofree ) {
325
338
hlist_move_list (& obj_to_free , & tofree );
326
- debug_objects_freed += obj_nr_tofree ;
327
339
WRITE_ONCE (obj_nr_tofree , 0 );
328
340
}
329
341
raw_spin_unlock_irqrestore (& pool_lock , flags );
330
342
331
- hlist_for_each_entry_safe (obj , tmp , & tofree , node ) {
332
- hlist_del (& obj -> node );
333
- kmem_cache_free (obj_cache , obj );
334
- }
343
+ free_object_list (& tofree );
335
344
}
336
345
337
346
static void __free_object (struct debug_obj * obj )
@@ -1334,6 +1343,7 @@ static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache
1334
1343
}
1335
1344
return true;
1336
1345
free :
1346
+ /* Can't use free_object_list() as the cache is not populated yet */
1337
1347
hlist_for_each_entry_safe (obj , tmp , & objects , node ) {
1338
1348
hlist_del (& obj -> node );
1339
1349
kmem_cache_free (cache , obj );
0 commit comments