@@ -47,11 +47,18 @@ struct debug_bucket {
4747 raw_spinlock_t lock ;
4848};
4949
50+ struct pool_stats {
51+ unsigned int cur_used ;
52+ unsigned int max_used ;
53+ unsigned int min_fill ;
54+ };
55+
5056struct obj_pool {
5157 struct hlist_head objects ;
5258 unsigned int cnt ;
5359 unsigned int min_cnt ;
5460 unsigned int max_cnt ;
61+ struct pool_stats stats ;
5562} ____cacheline_aligned ;
5663
5764
@@ -66,8 +73,11 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
6673static DEFINE_RAW_SPINLOCK (pool_lock );
6774
6875static struct obj_pool pool_global = {
69- .min_cnt = ODEBUG_POOL_MIN_LEVEL ,
70- .max_cnt = ODEBUG_POOL_SIZE ,
76+ .min_cnt = ODEBUG_POOL_MIN_LEVEL ,
77+ .max_cnt = ODEBUG_POOL_SIZE ,
78+ .stats = {
79+ .min_fill = ODEBUG_POOL_SIZE ,
80+ },
7181};
7282
7383static struct obj_pool pool_to_free = {
@@ -76,16 +86,6 @@ static struct obj_pool pool_to_free = {
7686
7787static HLIST_HEAD (pool_boot );
7888
79- /*
80- * Because of the presence of percpu free pools, obj_pool_free will
81- * under-count those in the percpu free pools. Similarly, obj_pool_used
82- * will over-count those in the percpu free pools. Adjustments will be
83- * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
84- * can be off.
85- */
86- static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE ;
87- static int obj_pool_used ;
88- static int __data_racy obj_pool_max_used ;
8989static bool obj_freeing ;
9090
9191static int __data_racy debug_objects_maxchain __read_mostly ;
@@ -231,6 +231,19 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
231231 return obj ;
232232}
233233
234+ static void pcpu_refill_stats (void )
235+ {
236+ struct pool_stats * stats = & pool_global .stats ;
237+
238+ WRITE_ONCE (stats -> cur_used , stats -> cur_used + ODEBUG_BATCH_SIZE );
239+
240+ if (stats -> cur_used > stats -> max_used )
241+ stats -> max_used = stats -> cur_used ;
242+
243+ if (pool_global .cnt < stats -> min_fill )
244+ stats -> min_fill = pool_global .cnt ;
245+ }
246+
234247static struct debug_obj * pcpu_alloc (void )
235248{
236249 struct obj_pool * pcp = this_cpu_ptr (& pool_pcpu );
@@ -250,13 +263,7 @@ static struct debug_obj *pcpu_alloc(void)
250263 if (!pool_move_batch (pcp , & pool_global ))
251264 return NULL ;
252265 }
253- obj_pool_used += ODEBUG_BATCH_SIZE ;
254-
255- if (obj_pool_used > obj_pool_max_used )
256- obj_pool_max_used = obj_pool_used ;
257-
258- if (pool_global .cnt < obj_pool_min_free )
259- obj_pool_min_free = pool_global .cnt ;
266+ pcpu_refill_stats ();
260267 }
261268}
262269
@@ -285,7 +292,7 @@ static void pcpu_free(struct debug_obj *obj)
285292 /* Try to fit the batch into the pool_global first */
286293 if (!pool_move_batch (& pool_global , pcp ))
287294 pool_move_batch (& pool_to_free , pcp );
288- obj_pool_used -= ODEBUG_BATCH_SIZE ;
295+ WRITE_ONCE ( pool_global . stats . cur_used , pool_global . stats . cur_used - ODEBUG_BATCH_SIZE ) ;
289296}
290297
291298static void free_object_list (struct hlist_head * head )
@@ -1074,23 +1081,33 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
10741081
10751082static int debug_stats_show (struct seq_file * m , void * v )
10761083{
1077- int cpu , obj_percpu_free = 0 ;
1084+ unsigned int cpu , pool_used , pcp_free = 0 ;
10781085
1086+ /*
1087+ * pool_global.stats.cur_used is the number of batches currently
1088+ * handed out to per CPU pools. Convert it to number of objects
1089+ * and subtract the number of free objects in the per CPU pools.
1090+ * As this is lockless the number is an estimate.
1091+ */
10791092 for_each_possible_cpu (cpu )
1080- obj_percpu_free += per_cpu (pool_pcpu .cnt , cpu );
1081-
1082- seq_printf (m , "max_chain :%d\n" , debug_objects_maxchain );
1083- seq_printf (m , "max_checked :%d\n" , debug_objects_maxchecked );
1084- seq_printf (m , "warnings :%d\n" , debug_objects_warnings );
1085- seq_printf (m , "fixups :%d\n" , debug_objects_fixups );
1086- seq_printf (m , "pool_free :%d\n" , pool_count (& pool_global ) + obj_percpu_free );
1087- seq_printf (m , "pool_pcp_free :%d\n" , obj_percpu_free );
1088- seq_printf (m , "pool_min_free :%d\n" , obj_pool_min_free );
1089- seq_printf (m , "pool_used :%d\n" , obj_pool_used - obj_percpu_free );
1090- seq_printf (m , "pool_max_used :%d\n" , obj_pool_max_used );
1091- seq_printf (m , "on_free_list :%d\n" , pool_count (& pool_to_free ));
1092- seq_printf (m , "objs_allocated:%d\n" , debug_objects_allocated );
1093- seq_printf (m , "objs_freed :%d\n" , debug_objects_freed );
1093+ pcp_free += per_cpu (pool_pcpu .cnt , cpu );
1094+
1095+ pool_used = data_race (pool_global .stats .cur_used );
1096+ pcp_free = min (pool_used , pcp_free );
1097+ pool_used -= pcp_free ;
1098+
1099+ seq_printf (m , "max_chain : %d\n" , debug_objects_maxchain );
1100+ seq_printf (m , "max_checked : %d\n" , debug_objects_maxchecked );
1101+ seq_printf (m , "warnings : %d\n" , debug_objects_warnings );
1102+ seq_printf (m , "fixups : %d\n" , debug_objects_fixups );
1103+ seq_printf (m , "pool_free : %u\n" , pool_count (& pool_global ) + pcp_free );
1104+ seq_printf (m , "pool_pcp_free : %u\n" , pcp_free );
1105+ seq_printf (m , "pool_min_free : %u\n" , data_race (pool_global .stats .min_fill ));
1106+ seq_printf (m , "pool_used : %u\n" , pool_used );
1107+ seq_printf (m , "pool_max_used : %u\n" , data_race (pool_global .stats .max_used ));
1108+ seq_printf (m , "on_free_list : %u\n" , pool_count (& pool_to_free ));
1109+ seq_printf (m , "objs_allocated: %d\n" , debug_objects_allocated );
1110+ seq_printf (m , "objs_freed : %d\n" , debug_objects_freed );
10941111 return 0 ;
10951112}
10961113DEFINE_SHOW_ATTRIBUTE (debug_stats );
0 commit comments