@@ -46,18 +46,29 @@ struct debug_bucket {
46
46
struct obj_pool {
47
47
struct hlist_head objects ;
48
48
unsigned int cnt ;
49
+ unsigned int min_cnt ;
50
+ unsigned int max_cnt ;
49
51
} ____cacheline_aligned ;
50
52
51
- static DEFINE_PER_CPU (struct obj_pool , pool_pcpu ) ;
53
+
54
+ static DEFINE_PER_CPU_ALIGNED (struct obj_pool , pool_pcpu ) = {
55
+ .max_cnt = ODEBUG_POOL_PERCPU_SIZE ,
56
+ };
52
57
53
58
static struct debug_bucket obj_hash [ODEBUG_HASH_SIZE ];
54
59
55
60
static struct debug_obj obj_static_pool [ODEBUG_POOL_SIZE ] __initdata ;
56
61
57
62
static DEFINE_RAW_SPINLOCK (pool_lock );
58
63
59
- static struct obj_pool pool_global ;
60
- static struct obj_pool pool_to_free ;
64
+ static struct obj_pool pool_global = {
65
+ .min_cnt = ODEBUG_POOL_MIN_LEVEL ,
66
+ .max_cnt = ODEBUG_POOL_SIZE ,
67
+ };
68
+
69
+ static struct obj_pool pool_to_free = {
70
+ .max_cnt = UINT_MAX ,
71
+ };
61
72
62
73
static HLIST_HEAD (pool_boot );
63
74
@@ -79,13 +90,9 @@ static int __data_racy debug_objects_fixups __read_mostly;
79
90
static int __data_racy debug_objects_warnings __read_mostly ;
80
91
static bool __data_racy debug_objects_enabled __read_mostly
81
92
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT ;
82
- static int debug_objects_pool_size __ro_after_init
83
- = ODEBUG_POOL_SIZE ;
84
- static int debug_objects_pool_min_level __ro_after_init
85
- = ODEBUG_POOL_MIN_LEVEL ;
86
93
87
- static const struct debug_obj_descr * descr_test __read_mostly ;
88
- static struct kmem_cache * obj_cache __ro_after_init ;
94
+ static const struct debug_obj_descr * descr_test __read_mostly ;
95
+ static struct kmem_cache * obj_cache __ro_after_init ;
89
96
90
97
/*
91
98
* Track numbers of kmem_cache_alloc()/free() calls done.
@@ -124,14 +131,14 @@ static __always_inline unsigned int pool_count(struct obj_pool *pool)
124
131
return READ_ONCE (pool -> cnt );
125
132
}
126
133
127
- static inline bool pool_global_should_refill ( void )
134
+ static __always_inline bool pool_should_refill ( struct obj_pool * pool )
128
135
{
129
- return READ_ONCE ( pool_global . cnt ) < debug_objects_pool_min_level ;
136
+ return pool_count ( pool ) < pool -> min_cnt ;
130
137
}
131
138
132
- static inline bool pool_global_must_refill ( void )
139
+ static __always_inline bool pool_must_refill ( struct obj_pool * pool )
133
140
{
134
- return READ_ONCE ( pool_global . cnt ) < ( debug_objects_pool_min_level / 2 ) ;
141
+ return pool_count ( pool ) < pool -> min_cnt / 2 ;
135
142
}
136
143
137
144
static void free_object_list (struct hlist_head * head )
@@ -178,7 +185,7 @@ static void fill_pool_from_freelist(void)
178
185
* Recheck with the lock held as the worker thread might have
179
186
* won the race and freed the global free list already.
180
187
*/
181
- while (pool_to_free .cnt && (pool_global .cnt < debug_objects_pool_min_level )) {
188
+ while (pool_to_free .cnt && (pool_global .cnt < pool_global . min_cnt )) {
182
189
obj = hlist_entry (pool_to_free .objects .first , typeof (* obj ), node );
183
190
hlist_del (& obj -> node );
184
191
WRITE_ONCE (pool_to_free .cnt , pool_to_free .cnt - 1 );
@@ -197,11 +204,11 @@ static void fill_pool(void)
197
204
* - One other CPU is already allocating
198
205
* - the global pool has not reached the critical level yet
199
206
*/
200
- if (!pool_global_must_refill ( ) && atomic_read (& cpus_allocating ))
207
+ if (!pool_must_refill ( & pool_global ) && atomic_read (& cpus_allocating ))
201
208
return ;
202
209
203
210
atomic_inc (& cpus_allocating );
204
- while (pool_global_should_refill ( )) {
211
+ while (pool_should_refill ( & pool_global )) {
205
212
struct debug_obj * new , * last = NULL ;
206
213
HLIST_HEAD (head );
207
214
int cnt ;
@@ -337,7 +344,7 @@ static void free_obj_work(struct work_struct *work)
337
344
if (!raw_spin_trylock_irqsave (& pool_lock , flags ))
338
345
return ;
339
346
340
- if (pool_global .cnt >= debug_objects_pool_size )
347
+ if (pool_global .cnt >= pool_global . max_cnt )
341
348
goto free_objs ;
342
349
343
350
/*
@@ -347,7 +354,7 @@ static void free_obj_work(struct work_struct *work)
347
354
* may be gearing up to use more and more objects, don't free any
348
355
* of them until the next round.
349
356
*/
350
- while (pool_to_free .cnt && pool_global .cnt < debug_objects_pool_size ) {
357
+ while (pool_to_free .cnt && pool_global .cnt < pool_global . max_cnt ) {
351
358
obj = hlist_entry (pool_to_free .objects .first , typeof (* obj ), node );
352
359
hlist_del (& obj -> node );
353
360
hlist_add_head (& obj -> node , & pool_global .objects );
@@ -408,7 +415,7 @@ static void __free_object(struct debug_obj *obj)
408
415
}
409
416
410
417
raw_spin_lock (& pool_lock );
411
- work = (pool_global .cnt > debug_objects_pool_size ) && obj_cache &&
418
+ work = (pool_global .cnt > pool_global . max_cnt ) && obj_cache &&
412
419
(pool_to_free .cnt < ODEBUG_FREE_WORK_MAX );
413
420
obj_pool_used -- ;
414
421
@@ -424,7 +431,7 @@ static void __free_object(struct debug_obj *obj)
424
431
}
425
432
}
426
433
427
- if ((pool_global .cnt > debug_objects_pool_size ) &&
434
+ if ((pool_global .cnt > pool_global . max_cnt ) &&
428
435
(pool_to_free .cnt < ODEBUG_FREE_WORK_MAX )) {
429
436
int i ;
430
437
@@ -629,13 +636,13 @@ static void debug_objects_fill_pool(void)
629
636
if (unlikely (!obj_cache ))
630
637
return ;
631
638
632
- if (likely (!pool_global_should_refill ( )))
639
+ if (likely (!pool_should_refill ( & pool_global )))
633
640
return ;
634
641
635
642
/* Try reusing objects from obj_to_free_list */
636
643
fill_pool_from_freelist ();
637
644
638
- if (likely (!pool_global_should_refill ( )))
645
+ if (likely (!pool_should_refill ( & pool_global )))
639
646
return ;
640
647
641
648
/*
@@ -1427,8 +1434,8 @@ void __init debug_objects_mem_init(void)
1427
1434
* system.
1428
1435
*/
1429
1436
extras = num_possible_cpus () * ODEBUG_BATCH_SIZE ;
1430
- debug_objects_pool_size += extras ;
1431
- debug_objects_pool_min_level += extras ;
1437
+ pool_global . max_cnt += extras ;
1438
+ pool_global . min_cnt += extras ;
1432
1439
1433
1440
/* Everything worked. Expose the cache */
1434
1441
obj_cache = cache ;
0 commit comments