Skip to content

Commit 96a9a04

Browse files
committed
debugobjects: Move min/max count into pool struct
Having the accounting in the datastructure is better in terms of cache lines and allows more optimizations later on. Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Zhen Lei <[email protected]> Link: https://lore.kernel.org/all/[email protected]
1 parent 18b8afc commit 96a9a04

File tree

1 file changed

+31
-24
lines changed

1 file changed

+31
-24
lines changed

lib/debugobjects.c

Lines changed: 31 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -46,18 +46,29 @@ struct debug_bucket {
4646
struct obj_pool {
4747
struct hlist_head objects;
4848
unsigned int cnt;
49+
unsigned int min_cnt;
50+
unsigned int max_cnt;
4951
} ____cacheline_aligned;
5052

51-
static DEFINE_PER_CPU(struct obj_pool, pool_pcpu);
53+
54+
static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu) = {
55+
.max_cnt = ODEBUG_POOL_PERCPU_SIZE,
56+
};
5257

5358
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
5459

5560
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
5661

5762
static DEFINE_RAW_SPINLOCK(pool_lock);
5863

59-
static struct obj_pool pool_global;
60-
static struct obj_pool pool_to_free;
64+
static struct obj_pool pool_global = {
65+
.min_cnt = ODEBUG_POOL_MIN_LEVEL,
66+
.max_cnt = ODEBUG_POOL_SIZE,
67+
};
68+
69+
static struct obj_pool pool_to_free = {
70+
.max_cnt = UINT_MAX,
71+
};
6172

6273
static HLIST_HEAD(pool_boot);
6374

@@ -79,13 +90,9 @@ static int __data_racy debug_objects_fixups __read_mostly;
7990
static int __data_racy debug_objects_warnings __read_mostly;
8091
static bool __data_racy debug_objects_enabled __read_mostly
8192
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
82-
static int debug_objects_pool_size __ro_after_init
83-
= ODEBUG_POOL_SIZE;
84-
static int debug_objects_pool_min_level __ro_after_init
85-
= ODEBUG_POOL_MIN_LEVEL;
8693

87-
static const struct debug_obj_descr *descr_test __read_mostly;
88-
static struct kmem_cache *obj_cache __ro_after_init;
94+
static const struct debug_obj_descr *descr_test __read_mostly;
95+
static struct kmem_cache *obj_cache __ro_after_init;
8996

9097
/*
9198
* Track numbers of kmem_cache_alloc()/free() calls done.
@@ -124,14 +131,14 @@ static __always_inline unsigned int pool_count(struct obj_pool *pool)
124131
return READ_ONCE(pool->cnt);
125132
}
126133

127-
static inline bool pool_global_should_refill(void)
134+
static __always_inline bool pool_should_refill(struct obj_pool *pool)
128135
{
129-
return READ_ONCE(pool_global.cnt) < debug_objects_pool_min_level;
136+
return pool_count(pool) < pool->min_cnt;
130137
}
131138

132-
static inline bool pool_global_must_refill(void)
139+
static __always_inline bool pool_must_refill(struct obj_pool *pool)
133140
{
134-
return READ_ONCE(pool_global.cnt) < (debug_objects_pool_min_level / 2);
141+
return pool_count(pool) < pool->min_cnt / 2;
135142
}
136143

137144
static void free_object_list(struct hlist_head *head)
@@ -178,7 +185,7 @@ static void fill_pool_from_freelist(void)
178185
* Recheck with the lock held as the worker thread might have
179186
* won the race and freed the global free list already.
180187
*/
181-
while (pool_to_free.cnt && (pool_global.cnt < debug_objects_pool_min_level)) {
188+
while (pool_to_free.cnt && (pool_global.cnt < pool_global.min_cnt)) {
182189
obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
183190
hlist_del(&obj->node);
184191
WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
@@ -197,11 +204,11 @@ static void fill_pool(void)
197204
* - One other CPU is already allocating
198205
* - the global pool has not reached the critical level yet
199206
*/
200-
if (!pool_global_must_refill() && atomic_read(&cpus_allocating))
207+
if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
201208
return;
202209

203210
atomic_inc(&cpus_allocating);
204-
while (pool_global_should_refill()) {
211+
while (pool_should_refill(&pool_global)) {
205212
struct debug_obj *new, *last = NULL;
206213
HLIST_HEAD(head);
207214
int cnt;
@@ -337,7 +344,7 @@ static void free_obj_work(struct work_struct *work)
337344
if (!raw_spin_trylock_irqsave(&pool_lock, flags))
338345
return;
339346

340-
if (pool_global.cnt >= debug_objects_pool_size)
347+
if (pool_global.cnt >= pool_global.max_cnt)
341348
goto free_objs;
342349

343350
/*
@@ -347,7 +354,7 @@ static void free_obj_work(struct work_struct *work)
347354
* may be gearing up to use more and more objects, don't free any
348355
* of them until the next round.
349356
*/
350-
while (pool_to_free.cnt && pool_global.cnt < debug_objects_pool_size) {
357+
while (pool_to_free.cnt && pool_global.cnt < pool_global.max_cnt) {
351358
obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
352359
hlist_del(&obj->node);
353360
hlist_add_head(&obj->node, &pool_global.objects);
@@ -408,7 +415,7 @@ static void __free_object(struct debug_obj *obj)
408415
}
409416

410417
raw_spin_lock(&pool_lock);
411-
work = (pool_global.cnt > debug_objects_pool_size) && obj_cache &&
418+
work = (pool_global.cnt > pool_global.max_cnt) && obj_cache &&
412419
(pool_to_free.cnt < ODEBUG_FREE_WORK_MAX);
413420
obj_pool_used--;
414421

@@ -424,7 +431,7 @@ static void __free_object(struct debug_obj *obj)
424431
}
425432
}
426433

427-
if ((pool_global.cnt > debug_objects_pool_size) &&
434+
if ((pool_global.cnt > pool_global.max_cnt) &&
428435
(pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) {
429436
int i;
430437

@@ -629,13 +636,13 @@ static void debug_objects_fill_pool(void)
629636
if (unlikely(!obj_cache))
630637
return;
631638

632-
if (likely(!pool_global_should_refill()))
639+
if (likely(!pool_should_refill(&pool_global)))
633640
return;
634641

635642
/* Try reusing objects from obj_to_free_list */
636643
fill_pool_from_freelist();
637644

638-
if (likely(!pool_global_should_refill()))
645+
if (likely(!pool_should_refill(&pool_global)))
639646
return;
640647

641648
/*
@@ -1427,8 +1434,8 @@ void __init debug_objects_mem_init(void)
14271434
* system.
14281435
*/
14291436
extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1430-
debug_objects_pool_size += extras;
1431-
debug_objects_pool_min_level += extras;
1437+
pool_global.max_cnt += extras;
1438+
pool_global.min_cnt += extras;
14321439

14331440
/* Everything worked. Expose the cache */
14341441
obj_cache = cache;

0 commit comments

Comments
 (0)