|
35 | 35 | * frequency of 10Hz and about 1024 objects for each freeing operation.
|
36 | 36 | * So it is freeing at most 10k debug objects per second.
|
37 | 37 | */
|
38 |
| -#define ODEBUG_FREE_WORK_MAX 1024 |
| 38 | +#define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE) |
39 | 39 | #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
|
40 | 40 |
|
41 | 41 | struct debug_bucket {
|
@@ -158,6 +158,21 @@ static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
|
158 | 158 | return true;
|
159 | 159 | }
|
160 | 160 |
|
| 161 | +static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src) |
| 162 | +{ |
| 163 | + if (!src->cnt) |
| 164 | + return false; |
| 165 | + |
| 166 | + for (int i = 0; src->cnt && i < ODEBUG_BATCH_SIZE; i++) { |
| 167 | + struct hlist_node *node = src->objects.first; |
| 168 | + |
| 169 | + WRITE_ONCE(src->cnt, src->cnt - 1); |
| 170 | + hlist_del(node); |
| 171 | + hlist_add_head(node, head); |
| 172 | + } |
| 173 | + return true; |
| 174 | +} |
| 175 | + |
161 | 176 | static struct debug_obj *__alloc_object(struct hlist_head *list)
|
162 | 177 | {
|
163 | 178 | struct debug_obj *obj;
|
@@ -343,55 +358,36 @@ static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
|
343 | 358 | return obj;
|
344 | 359 | }
|
345 | 360 |
|
346 |
| -/* |
347 |
| - * workqueue function to free objects. |
348 |
| - * |
349 |
| - * To reduce contention on the global pool_lock, the actual freeing of |
350 |
| - * debug objects will be delayed if the pool_lock is busy. |
351 |
| - */ |
| 361 | +/* workqueue function to free objects. */ |
352 | 362 | static void free_obj_work(struct work_struct *work)
|
353 | 363 | {
|
354 |
| - struct debug_obj *obj; |
355 |
| - unsigned long flags; |
356 |
| - HLIST_HEAD(tofree); |
| 364 | + bool free = true; |
357 | 365 |
|
358 | 366 | WRITE_ONCE(obj_freeing, false);
|
359 |
| - if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
360 |
| - return; |
361 | 367 |
|
362 |
| - if (pool_global.cnt >= pool_global.max_cnt) |
363 |
| - goto free_objs; |
364 |
| - |
365 |
| - /* |
366 |
| - * The objs on the pool list might be allocated before the work is |
367 |
| - * run, so recheck if pool list it full or not, if not fill pool |
368 |
| - * list from the global free list. As it is likely that a workload |
369 |
| - * may be gearing up to use more and more objects, don't free any |
370 |
| - * of them until the next round. |
371 |
| - */ |
372 |
| - while (pool_to_free.cnt && pool_global.cnt < pool_global.max_cnt) { |
373 |
| - obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node); |
374 |
| - hlist_del(&obj->node); |
375 |
| - hlist_add_head(&obj->node, &pool_global.objects); |
376 |
| - WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1); |
377 |
| - WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1); |
378 |
| - } |
379 |
| - raw_spin_unlock_irqrestore(&pool_lock, flags); |
380 |
| - return; |
| 368 | + if (!pool_count(&pool_to_free)) |
| 369 | + return; |
381 | 370 |
|
382 |
| -free_objs: |
383 |
| - /* |
384 |
| - * Pool list is already full and there are still objs on the free |
385 |
| - * list. Move remaining free objs to a temporary list to free the |
386 |
| - * memory outside the pool_lock held region. |
387 |
| - */ |
388 |
| - if (pool_to_free.cnt) { |
389 |
| - hlist_move_list(&pool_to_free.objects, &tofree); |
390 |
| - WRITE_ONCE(pool_to_free.cnt, 0); |
| 371 | + for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) { |
| 372 | + HLIST_HEAD(tofree); |
| 373 | + |
| 374 | + /* Acquire and drop the lock for each batch */ |
| 375 | + scoped_guard(raw_spinlock_irqsave, &pool_lock) { |
| 376 | + if (!pool_to_free.cnt) |
| 377 | + return; |
| 378 | + |
| 379 | + /* Refill the global pool if possible */ |
| 380 | + if (pool_move_batch(&pool_global, &pool_to_free)) { |
| 381 | + /* Don't free as there seems to be demand */ |
| 382 | + free = false; |
| 383 | + } else if (free) { |
| 384 | + pool_pop_batch(&tofree, &pool_to_free); |
| 385 | + } else { |
| 386 | + return; |
| 387 | + } |
| 388 | + } |
| 389 | + free_object_list(&tofree); |
391 | 390 | }
|
392 |
| - raw_spin_unlock_irqrestore(&pool_lock, flags); |
393 |
| - |
394 |
| - free_object_list(&tofree); |
395 | 391 | }
|
396 | 392 |
|
397 | 393 | static void __free_object(struct debug_obj *obj)
|
|
0 commit comments