@@ -199,6 +199,27 @@ static struct debug_obj *pcpu_alloc(void)
199199 }
200200}
201201
202+ static void pcpu_free (struct debug_obj * obj )
203+ {
204+ struct obj_pool * pcp = this_cpu_ptr (& pool_pcpu );
205+
206+ lockdep_assert_irqs_disabled ();
207+
208+ hlist_add_head (& obj -> node , & pcp -> objects );
209+ pcp -> cnt ++ ;
210+
211+ /* Pool full ? */
212+ if (pcp -> cnt < ODEBUG_POOL_PERCPU_SIZE )
213+ return ;
214+
215+ /* Remove a batch from the per CPU pool */
216+ guard (raw_spinlock )(& pool_lock );
217+ /* Try to fit the batch into the pool_global first */
218+ if (!pool_move_batch (& pool_global , pcp ))
219+ pool_move_batch (& pool_to_free , pcp );
220+ obj_pool_used -= ODEBUG_BATCH_SIZE ;
221+ }
222+
202223static void free_object_list (struct hlist_head * head )
203224{
204225 struct hlist_node * tmp ;
@@ -375,83 +396,11 @@ static void free_obj_work(struct work_struct *work)
375396
376397static void __free_object (struct debug_obj * obj )
377398{
378- struct debug_obj * objs [ODEBUG_BATCH_SIZE ];
379- struct obj_pool * percpu_pool ;
380- int lookahead_count = 0 ;
381- bool work ;
382-
383399 guard (irqsave )();
384-
385- if (unlikely (!obj_cache )) {
400+ if (likely (obj_cache ))
401+ pcpu_free (obj );
402+ else
386403 hlist_add_head (& obj -> node , & pool_boot );
387- return ;
388- }
389-
390- /*
391- * Try to free it into the percpu pool first.
392- */
393- percpu_pool = this_cpu_ptr (& pool_pcpu );
394- if (percpu_pool -> cnt < ODEBUG_POOL_PERCPU_SIZE ) {
395- hlist_add_head (& obj -> node , & percpu_pool -> objects );
396- percpu_pool -> cnt ++ ;
397- return ;
398- }
399-
400- /*
401- * As the percpu pool is full, look ahead and pull out a batch
402- * of objects from the percpu pool and free them as well.
403- */
404- for (; lookahead_count < ODEBUG_BATCH_SIZE ; lookahead_count ++ ) {
405- objs [lookahead_count ] = __alloc_object (& percpu_pool -> objects );
406- if (!objs [lookahead_count ])
407- break ;
408- percpu_pool -> cnt -- ;
409- }
410-
411- raw_spin_lock (& pool_lock );
412- work = (pool_global .cnt > pool_global .max_cnt ) && obj_cache &&
413- (pool_to_free .cnt < ODEBUG_FREE_WORK_MAX );
414- obj_pool_used -- ;
415-
416- if (work ) {
417- WRITE_ONCE (pool_to_free .cnt , pool_to_free .cnt + 1 );
418- hlist_add_head (& obj -> node , & pool_to_free .objects );
419- if (lookahead_count ) {
420- WRITE_ONCE (pool_to_free .cnt , pool_to_free .cnt + lookahead_count );
421- obj_pool_used -= lookahead_count ;
422- while (lookahead_count ) {
423- hlist_add_head (& objs [-- lookahead_count ]-> node ,
424- & pool_to_free .objects );
425- }
426- }
427-
428- if ((pool_global .cnt > pool_global .max_cnt ) &&
429- (pool_to_free .cnt < ODEBUG_FREE_WORK_MAX )) {
430- int i ;
431-
432- /*
433- * Free one more batch of objects from obj_pool.
434- */
435- for (i = 0 ; i < ODEBUG_BATCH_SIZE ; i ++ ) {
436- obj = __alloc_object (& pool_global .objects );
437- hlist_add_head (& obj -> node , & pool_to_free .objects );
438- WRITE_ONCE (pool_global .cnt , pool_global .cnt - 1 );
439- WRITE_ONCE (pool_to_free .cnt , pool_to_free .cnt + 1 );
440- }
441- }
442- } else {
443- WRITE_ONCE (pool_global .cnt , pool_global .cnt + 1 );
444- hlist_add_head (& obj -> node , & pool_global .objects );
445- if (lookahead_count ) {
446- WRITE_ONCE (pool_global .cnt , pool_global .cnt + lookahead_count );
447- obj_pool_used -= lookahead_count ;
448- while (lookahead_count ) {
449- hlist_add_head (& objs [-- lookahead_count ]-> node ,
450- & pool_global .objects );
451- }
452- }
453- }
454- raw_spin_unlock (& pool_lock );
455404}
456405
457406/*
0 commit comments