@@ -33,7 +33,9 @@ static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT;
3333char CTL_DEFAULT_ENTRIES [UMF_DEFAULT_SIZE ][UMF_DEFAULT_LEN ] = {0 };
3434char CTL_DEFAULT_VALUES [UMF_DEFAULT_SIZE ][UMF_DEFAULT_LEN ] = {0 };
3535
36- void ctl_init (void ) { utils_mutex_init (& ctl_mtx ); }
36+ struct ctl umf_pool_ctl_root ;
37+
38+ void ctl_init (void );
3739
3840static int CTL_SUBTREE_HANDLER (by_handle_pool )(void * ctx ,
3941 umf_ctl_query_source_t source ,
@@ -43,9 +45,15 @@ static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx,
4345 umf_ctl_query_type_t queryType ) {
4446 (void )indexes , (void )source ;
4547 umf_memory_pool_handle_t hPool = (umf_memory_pool_handle_t )ctx ;
48+ int ret = ctl_query (& umf_pool_ctl_root , hPool , source , extra_name ,
49+ queryType , arg , size );
50+ if (ret == -1 &&
51+ errno == EINVAL ) { // node was not found in pool_ctl_root, try to
52+ // query the specific pool directly
53+ hPool -> ops .ext_ctl (hPool -> pool_priv , source , extra_name , arg , size ,
54+ queryType );
55+ }
4656
47- hPool -> ops .ext_ctl (hPool -> pool_priv , /*unused*/ 0 , extra_name , arg , size ,
48- queryType );
4957 return 0 ;
5058}
5159
@@ -96,9 +104,38 @@ static int CTL_SUBTREE_HANDLER(default)(void *ctx,
96104 return 0 ;
97105}
98106
107+ static int CTL_READ_HANDLER (alloc_count )(void * ctx ,
108+ umf_ctl_query_source_t source ,
109+ void * arg , size_t size ,
110+ umf_ctl_index_utlist_t * indexes ,
111+ const char * extra_name ,
112+ umf_ctl_query_type_t query_type ) {
113+ /* suppress unused-parameter errors */
114+ (void )source , (void )size , (void )indexes , (void )extra_name , (void )query_type ;
115+
116+ size_t * arg_out = arg ;
117+ if (ctx == NULL || arg_out == NULL ) {
118+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
119+ }
120+
121+ assert (size == sizeof (size_t ));
122+
123+ umf_memory_pool_handle_t pool = (umf_memory_pool_handle_t )ctx ;
124+ utils_atomic_load_acquire_size_t (& pool -> stats .alloc_count , arg_out );
125+ return 0 ;
126+ }
127+
128+ static const umf_ctl_node_t CTL_NODE (stats )[] = {CTL_LEAF_RO (alloc_count ),
129+ CTL_NODE_END };
130+
99131umf_ctl_node_t CTL_NODE (pool )[] = {CTL_LEAF_SUBTREE2 (by_handle , by_handle_pool ),
100132 CTL_LEAF_SUBTREE (default ), CTL_NODE_END };
101133
134+ void ctl_init (void ) {
135+ utils_mutex_init (& ctl_mtx );
136+ CTL_REGISTER_MODULE (& umf_pool_ctl_root , stats );
137+ }
138+
102139static umf_result_t umfDefaultCtlPoolHandle (void * hPool , int operationType ,
103140 const char * name , void * arg ,
104141 size_t size ,
@@ -160,6 +197,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
160197 pool -> flags = flags ;
161198 pool -> ops = * ops ;
162199 pool -> tag = NULL ;
200+ memset (& pool -> stats , 0 , sizeof (pool -> stats ));
163201
164202 if (NULL == pool -> ops .ext_ctl ) {
165203 pool -> ops .ext_ctl = umfDefaultCtlPoolHandle ;
@@ -285,23 +323,48 @@ umf_result_t umfPoolCreate(const umf_memory_pool_ops_t *ops,
285323
286324void * umfPoolMalloc (umf_memory_pool_handle_t hPool , size_t size ) {
287325 UMF_CHECK ((hPool != NULL ), NULL );
288- return hPool -> ops .malloc (hPool -> pool_priv , size );
326+ void * ret = hPool -> ops .malloc (hPool -> pool_priv , size );
327+ if (!ret ) {
328+ return ret ;
329+ }
330+
331+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
332+
333+ return ret ;
289334}
290335
291336void * umfPoolAlignedMalloc (umf_memory_pool_handle_t hPool , size_t size ,
292337 size_t alignment ) {
293338 UMF_CHECK ((hPool != NULL ), NULL );
294- return hPool -> ops .aligned_malloc (hPool -> pool_priv , size , alignment );
339+ void * ret = hPool -> ops .aligned_malloc (hPool -> pool_priv , size , alignment );
340+ if (!ret ) {
341+ return ret ;
342+ }
343+
344+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
345+ return ret ;
295346}
296347
297348void * umfPoolCalloc (umf_memory_pool_handle_t hPool , size_t num , size_t size ) {
298349 UMF_CHECK ((hPool != NULL ), NULL );
299- return hPool -> ops .calloc (hPool -> pool_priv , num , size );
350+ void * ret = hPool -> ops .calloc (hPool -> pool_priv , num , size );
351+ if (!ret ) {
352+ return ret ;
353+ }
354+
355+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
356+ return ret ;
300357}
301358
302359void * umfPoolRealloc (umf_memory_pool_handle_t hPool , void * ptr , size_t size ) {
303360 UMF_CHECK ((hPool != NULL ), NULL );
304- return hPool -> ops .realloc (hPool -> pool_priv , ptr , size );
361+ void * ret = hPool -> ops .realloc (hPool -> pool_priv , ptr , size );
362+ if (size == 0 && ret == NULL && ptr != NULL ) { // this is free(ptr)
363+ utils_atomic_decrement_size_t (& hPool -> stats .alloc_count );
364+ } else if (ptr == NULL && ret != NULL ) { // this is malloc(size)
365+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
366+ }
367+ return ret ;
305368}
306369
307370size_t umfPoolMallocUsableSize (umf_memory_pool_handle_t hPool ,
@@ -312,7 +375,14 @@ size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool,
312375
313376umf_result_t umfPoolFree (umf_memory_pool_handle_t hPool , void * ptr ) {
314377 UMF_CHECK ((hPool != NULL ), UMF_RESULT_ERROR_INVALID_ARGUMENT );
315- return hPool -> ops .free (hPool -> pool_priv , ptr );
378+ umf_result_t ret = hPool -> ops .free (hPool -> pool_priv , ptr );
379+
380+ if (ret != UMF_RESULT_SUCCESS ) {
381+ return ret ;
382+ }
383+
384+ utils_atomic_decrement_size_t (& hPool -> stats .alloc_count );
385+ return ret ;
316386}
317387
318388umf_result_t umfPoolGetLastAllocationError (umf_memory_pool_handle_t hPool ) {
0 commit comments