@@ -141,6 +141,64 @@ static __always_inline bool pool_must_refill(struct obj_pool *pool)
141141 return pool_count (pool ) < pool -> min_cnt / 2 ;
142142}
143143
144+ static bool pool_move_batch (struct obj_pool * dst , struct obj_pool * src )
145+ {
146+ if (dst -> cnt + ODEBUG_BATCH_SIZE > dst -> max_cnt || !src -> cnt )
147+ return false;
148+
149+ for (int i = 0 ; i < ODEBUG_BATCH_SIZE && src -> cnt ; i ++ ) {
150+ struct hlist_node * node = src -> objects .first ;
151+
152+ WRITE_ONCE (src -> cnt , src -> cnt - 1 );
153+ WRITE_ONCE (dst -> cnt , dst -> cnt + 1 );
154+
155+ hlist_del (node );
156+ hlist_add_head (node , & dst -> objects );
157+ }
158+ return true;
159+ }
160+
161+ static struct debug_obj * __alloc_object (struct hlist_head * list )
162+ {
163+ struct debug_obj * obj ;
164+
165+ if (unlikely (!list -> first ))
166+ return NULL ;
167+
168+ obj = hlist_entry (list -> first , typeof (* obj ), node );
169+ hlist_del (& obj -> node );
170+ return obj ;
171+ }
172+
173+ static struct debug_obj * pcpu_alloc (void )
174+ {
175+ struct obj_pool * pcp = this_cpu_ptr (& pool_pcpu );
176+
177+ lockdep_assert_irqs_disabled ();
178+
179+ for (;;) {
180+ struct debug_obj * obj = __alloc_object (& pcp -> objects );
181+
182+ if (likely (obj )) {
183+ pcp -> cnt -- ;
184+ return obj ;
185+ }
186+
187+ guard (raw_spinlock )(& pool_lock );
188+ if (!pool_move_batch (pcp , & pool_to_free )) {
189+ if (!pool_move_batch (pcp , & pool_global ))
190+ return NULL ;
191+ }
192+ obj_pool_used += pcp -> cnt ;
193+
194+ if (obj_pool_used > obj_pool_max_used )
195+ obj_pool_max_used = obj_pool_used ;
196+
197+ if (pool_global .cnt < obj_pool_min_free )
198+ obj_pool_min_free = pool_global .cnt ;
199+ }
200+ }
201+
144202static void free_object_list (struct hlist_head * head )
145203{
146204 struct hlist_node * tmp ;
@@ -158,7 +216,6 @@ static void free_object_list(struct hlist_head *head)
158216static void fill_pool_from_freelist (void )
159217{
160218 static unsigned long state ;
161- struct debug_obj * obj ;
162219
163220 /*
164221 * Reuse objs from the global obj_to_free list; they will be
@@ -180,17 +237,11 @@ static void fill_pool_from_freelist(void)
180237 if (test_bit (0 , & state ) || test_and_set_bit (0 , & state ))
181238 return ;
182239
183- guard (raw_spinlock )(& pool_lock );
184- /*
185- * Recheck with the lock held as the worker thread might have
186- * won the race and freed the global free list already.
187- */
188- while (pool_to_free .cnt && (pool_global .cnt < pool_global .min_cnt )) {
189- obj = hlist_entry (pool_to_free .objects .first , typeof (* obj ), node );
190- hlist_del (& obj -> node );
191- WRITE_ONCE (pool_to_free .cnt , pool_to_free .cnt - 1 );
192- hlist_add_head (& obj -> node , & pool_global .objects );
193- WRITE_ONCE (pool_global .cnt , pool_global .cnt + 1 );
240+ /* Avoid taking the lock when there is no work to do */
241+ while (pool_should_refill (& pool_global ) && pool_count (& pool_to_free )) {
242+ guard (raw_spinlock )(& pool_lock );
243+ /* Move a batch if possible */
244+ pool_move_batch (& pool_global , & pool_to_free );
194245 }
195246 clear_bit (0 , & state );
196247}
@@ -251,74 +302,17 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
251302 return NULL ;
252303}
253304
254- /*
255- * Allocate a new object from the hlist
256- */
257- static struct debug_obj * __alloc_object (struct hlist_head * list )
258- {
259- struct debug_obj * obj = NULL ;
260-
261- if (list -> first ) {
262- obj = hlist_entry (list -> first , typeof (* obj ), node );
263- hlist_del (& obj -> node );
264- }
265-
266- return obj ;
267- }
268-
269- static struct debug_obj *
270- alloc_object (void * addr , struct debug_bucket * b , const struct debug_obj_descr * descr )
305+ static struct debug_obj * alloc_object (void * addr , struct debug_bucket * b ,
306+ const struct debug_obj_descr * descr )
271307{
272- struct obj_pool * percpu_pool = this_cpu_ptr (& pool_pcpu );
273308 struct debug_obj * obj ;
274309
275- if (likely (obj_cache )) {
276- obj = __alloc_object (& percpu_pool -> objects );
277- if (obj ) {
278- percpu_pool -> cnt -- ;
279- goto init_obj ;
280- }
281- } else {
310+ if (likely (obj_cache ))
311+ obj = pcpu_alloc ();
312+ else
282313 obj = __alloc_object (& pool_boot );
283- goto init_obj ;
284- }
285-
286- raw_spin_lock (& pool_lock );
287- obj = __alloc_object (& pool_global .objects );
288- if (obj ) {
289- obj_pool_used ++ ;
290- WRITE_ONCE (pool_global .cnt , pool_global .cnt - 1 );
291314
292- /*
293- * Looking ahead, allocate one batch of debug objects and
294- * put them into the percpu free pool.
295- */
296- if (likely (obj_cache )) {
297- int i ;
298-
299- for (i = 0 ; i < ODEBUG_BATCH_SIZE ; i ++ ) {
300- struct debug_obj * obj2 ;
301-
302- obj2 = __alloc_object (& pool_global .objects );
303- if (!obj2 )
304- break ;
305- hlist_add_head (& obj2 -> node , & percpu_pool -> objects );
306- percpu_pool -> cnt ++ ;
307- obj_pool_used ++ ;
308- WRITE_ONCE (pool_global .cnt , pool_global .cnt - 1 );
309- }
310- }
311-
312- if (obj_pool_used > obj_pool_max_used )
313- obj_pool_max_used = obj_pool_used ;
314-
315- if (pool_global .cnt < obj_pool_min_free )
316- obj_pool_min_free = pool_global .cnt ;
317- }
318- raw_spin_unlock (& pool_lock );
319-
320- init_obj :
321- if (obj ) {
315+ if (likely (obj )) {
322316 obj -> object = addr ;
323317 obj -> descr = descr ;
324318 obj -> state = ODEBUG_STATE_NONE ;
0 commit comments