Skip to content

Commit d8c6cd3

Browse files
Zhen LeiKAGA-KOKO
authored andcommitted
debugobjects: Reduce parallel pool fill attempts
The contention on the global pool_lock can be massive when the global pool needs to be refilled and many CPUs try to handle this. Address this by: - splitting the refill from free list and allocation. Refill from free list has no constraints vs. the context on RT, so it can be tried outside of the RT specific preemptible() guard - Let only one CPU handle the free list - Let only one CPU do allocations unless the pool level is below half of the minimum fill level. Suggested-by: Thomas Gleixner <[email protected]> Signed-off-by: Zhen Lei <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Link: https://lore.kernel.org/all/[email protected] Link: https://lore.kernel.org/all/[email protected] -- lib/debugobjects.c | 84 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 59 insertions(+), 25 deletions(-)
1 parent 661cc28 commit d8c6cd3

File tree

1 file changed

+59
-25
lines changed

1 file changed

+59
-25
lines changed

lib/debugobjects.c

Lines changed: 59 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -138,14 +138,10 @@ static void free_object_list(struct hlist_head *head)
138138
debug_objects_freed += cnt;
139139
}
140140

141-
static void fill_pool(void)
141+
static void fill_pool_from_freelist(void)
142142
{
143-
gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
143+
static unsigned long state;
144144
struct debug_obj *obj;
145-
unsigned long flags;
146-
147-
if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
148-
return;
149145

150146
/*
151147
* Reuse objs from the global obj_to_free list; they will be
@@ -154,47 +150,73 @@ static void fill_pool(void)
154150
* obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
155151
* the WRITE_ONCE() in pool_lock critical sections.
156152
*/
157-
if (READ_ONCE(obj_nr_tofree)) {
158-
raw_spin_lock_irqsave(&pool_lock, flags);
159-
/*
160-
* Recheck with the lock held as the worker thread might have
161-
* won the race and freed the global free list already.
162-
*/
163-
while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
164-
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
165-
hlist_del(&obj->node);
166-
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
167-
hlist_add_head(&obj->node, &obj_pool);
168-
WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
169-
}
170-
raw_spin_unlock_irqrestore(&pool_lock, flags);
153+
if (!READ_ONCE(obj_nr_tofree))
154+
return;
155+
156+
/*
157+
* Prevent the context from being scheduled or interrupted after
158+
* setting the state flag;
159+
*/
160+
guard(irqsave)();
161+
162+
/*
163+
* Avoid lock contention on &pool_lock and avoid making the cache
164+
* line exclusive by testing the bit before attempting to set it.
165+
*/
166+
if (test_bit(0, &state) || test_and_set_bit(0, &state))
167+
return;
168+
169+
guard(raw_spinlock)(&pool_lock);
170+
/*
171+
* Recheck with the lock held as the worker thread might have
172+
* won the race and freed the global free list already.
173+
*/
174+
while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
175+
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
176+
hlist_del(&obj->node);
177+
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
178+
hlist_add_head(&obj->node, &obj_pool);
179+
WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
171180
}
181+
clear_bit(0, &state);
182+
}
172183

173-
if (unlikely(!obj_cache))
184+
static void fill_pool(void)
185+
{
186+
static atomic_t cpus_allocating;
187+
188+
/*
189+
* Avoid allocation and lock contention when:
190+
* - One other CPU is already allocating
191+
* - the global pool has not reached the critical level yet
192+
*/
193+
if (READ_ONCE(obj_pool_free) > (debug_objects_pool_min_level / 2) &&
194+
atomic_read(&cpus_allocating))
174195
return;
175196

197+
atomic_inc(&cpus_allocating);
176198
while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
177199
struct debug_obj *new, *last = NULL;
178200
HLIST_HEAD(head);
179201
int cnt;
180202

181203
for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
182-
new = kmem_cache_zalloc(obj_cache, gfp);
204+
new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
183205
if (!new)
184206
break;
185207
hlist_add_head(&new->node, &head);
186208
if (!last)
187209
last = new;
188210
}
189211
if (!cnt)
190-
return;
212+
break;
191213

192-
raw_spin_lock_irqsave(&pool_lock, flags);
214+
guard(raw_spinlock_irqsave)(&pool_lock);
193215
hlist_splice_init(&head, &last->node, &obj_pool);
194216
debug_objects_allocated += cnt;
195217
WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
196-
raw_spin_unlock_irqrestore(&pool_lock, flags);
197218
}
219+
atomic_dec(&cpus_allocating);
198220
}
199221

200222
/*
@@ -597,6 +619,18 @@ static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket
597619

598620
static void debug_objects_fill_pool(void)
599621
{
622+
if (unlikely(!obj_cache))
623+
return;
624+
625+
if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
626+
return;
627+
628+
/* Try reusing objects from obj_to_free_list */
629+
fill_pool_from_freelist();
630+
631+
if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
632+
return;
633+
600634
/*
601635
* On RT enabled kernels the pool refill must happen in preemptible
602636
* context -- for !RT kernels we rely on the fact that spinlock_t and

0 commit comments

Comments
 (0)