@@ -33,7 +33,9 @@ static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT;
3333char CTL_DEFAULT_ENTRIES [UMF_DEFAULT_SIZE ][UMF_DEFAULT_LEN ] = {0 };
3434char CTL_DEFAULT_VALUES [UMF_DEFAULT_SIZE ][UMF_DEFAULT_LEN ] = {0 };
3535
36- void ctl_init (void ) { utils_mutex_init (& ctl_mtx ); }
36+ static struct ctl umf_pool_ctl_root ;
37+
38+ static void ctl_init (void );
3739
3840static int CTL_SUBTREE_HANDLER (by_handle_pool )(void * ctx ,
3941 umf_ctl_query_source_t source ,
@@ -43,9 +45,15 @@ static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx,
4345 umf_ctl_query_type_t queryType ) {
4446 (void )indexes , (void )source ;
4547 umf_memory_pool_handle_t hPool = (umf_memory_pool_handle_t )ctx ;
48+ int ret = ctl_query (& umf_pool_ctl_root , hPool , source , extra_name ,
49+ queryType , arg , size );
50+ if (ret == -1 &&
51+ errno == EINVAL ) { // node was not found in pool_ctl_root, try to
52+ // query the specific pool directly
53+ hPool -> ops .ext_ctl (hPool -> pool_priv , source , extra_name , arg , size ,
54+ queryType );
55+ }
4656
47- hPool -> ops .ext_ctl (hPool -> pool_priv , /*unused*/ 0 , extra_name , arg , size ,
48- queryType );
4957 return 0 ;
5058}
5159
@@ -96,9 +104,38 @@ static int CTL_SUBTREE_HANDLER(default)(void *ctx,
96104 return 0 ;
97105}
98106
107+ static int CTL_READ_HANDLER (alloc_count )(void * ctx ,
108+ umf_ctl_query_source_t source ,
109+ void * arg , size_t size ,
110+ umf_ctl_index_utlist_t * indexes ,
111+ const char * extra_name ,
112+ umf_ctl_query_type_t query_type ) {
113+ /* suppress unused-parameter errors */
114+ (void )source , (void )size , (void )indexes , (void )extra_name , (void )query_type ;
115+
116+ size_t * arg_out = arg ;
117+ if (ctx == NULL || arg_out == NULL ) {
118+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
119+ }
120+
121+ assert (size == sizeof (size_t ));
122+
123+ umf_memory_pool_handle_t pool = (umf_memory_pool_handle_t )ctx ;
124+ utils_atomic_load_acquire_size_t (& pool -> stats .alloc_count , arg_out );
125+ return UMF_RESULT_SUCCESS ;
126+ }
127+
128+ static const umf_ctl_node_t CTL_NODE (stats )[] = {CTL_LEAF_RO (alloc_count ),
129+ CTL_NODE_END };
130+
99131umf_ctl_node_t CTL_NODE (pool )[] = {CTL_LEAF_SUBTREE2 (by_handle , by_handle_pool ),
100132 CTL_LEAF_SUBTREE (default ), CTL_NODE_END };
101133
134+ static void ctl_init (void ) {
135+ utils_mutex_init (& ctl_mtx );
136+ CTL_REGISTER_MODULE (& umf_pool_ctl_root , stats );
137+ }
138+
102139static umf_result_t umfDefaultCtlPoolHandle (void * hPool , int operationType ,
103140 const char * name , void * arg ,
104141 size_t size ,
@@ -160,6 +197,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
160197 pool -> flags = flags ;
161198 pool -> ops = * ops ;
162199 pool -> tag = NULL ;
200+ memset (& pool -> stats , 0 , sizeof (pool -> stats ));
163201
164202 if (NULL == pool -> ops .ext_ctl ) {
165203 pool -> ops .ext_ctl = umfDefaultCtlPoolHandle ;
@@ -285,23 +323,47 @@ umf_result_t umfPoolCreate(const umf_memory_pool_ops_t *ops,
285323
286324void * umfPoolMalloc (umf_memory_pool_handle_t hPool , size_t size ) {
287325 UMF_CHECK ((hPool != NULL ), NULL );
288- return hPool -> ops .malloc (hPool -> pool_priv , size );
326+ void * ret = hPool -> ops .malloc (hPool -> pool_priv , size );
327+ if (!ret ) {
328+ return NULL ;
329+ }
330+
331+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
332+ return ret ;
289333}
290334
291335void * umfPoolAlignedMalloc (umf_memory_pool_handle_t hPool , size_t size ,
292336 size_t alignment ) {
293337 UMF_CHECK ((hPool != NULL ), NULL );
294- return hPool -> ops .aligned_malloc (hPool -> pool_priv , size , alignment );
338+ void * ret = hPool -> ops .aligned_malloc (hPool -> pool_priv , size , alignment );
339+ if (!ret ) {
340+ return NULL ;
341+ }
342+
343+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
344+ return ret ;
295345}
296346
297347void * umfPoolCalloc (umf_memory_pool_handle_t hPool , size_t num , size_t size ) {
298348 UMF_CHECK ((hPool != NULL ), NULL );
299- return hPool -> ops .calloc (hPool -> pool_priv , num , size );
349+ void * ret = hPool -> ops .calloc (hPool -> pool_priv , num , size );
350+ if (!ret ) {
351+ return NULL ;
352+ }
353+
354+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
355+ return ret ;
300356}
301357
302358void * umfPoolRealloc (umf_memory_pool_handle_t hPool , void * ptr , size_t size ) {
303359 UMF_CHECK ((hPool != NULL ), NULL );
304- return hPool -> ops .realloc (hPool -> pool_priv , ptr , size );
360+ void * ret = hPool -> ops .realloc (hPool -> pool_priv , ptr , size );
361+ if (size == 0 && ret == NULL && ptr != NULL ) { // this is free(ptr)
362+ utils_atomic_decrement_size_t (& hPool -> stats .alloc_count );
363+ } else if (ptr == NULL && ret != NULL ) { // this is malloc(size)
364+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
365+ }
366+ return ret ;
305367}
306368
307369size_t umfPoolMallocUsableSize (umf_memory_pool_handle_t hPool ,
@@ -312,7 +374,15 @@ size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool,
312374
313375umf_result_t umfPoolFree (umf_memory_pool_handle_t hPool , void * ptr ) {
314376 UMF_CHECK ((hPool != NULL ), UMF_RESULT_ERROR_INVALID_ARGUMENT );
315- return hPool -> ops .free (hPool -> pool_priv , ptr );
377+ umf_result_t ret = hPool -> ops .free (hPool -> pool_priv , ptr );
378+
379+ if (ret != UMF_RESULT_SUCCESS ) {
380+ return ret ;
381+ }
382+ if (ptr != NULL ) {
383+ utils_atomic_decrement_size_t (& hPool -> stats .alloc_count );
384+ }
385+ return ret ;
316386}
317387
318388umf_result_t umfPoolGetLastAllocationError (umf_memory_pool_handle_t hPool ) {
0 commit comments