From fb6259af36de7debfb638f8d63529c59ad765e96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Mon, 9 Jun 2025 12:38:23 +0200 Subject: [PATCH] add pool alloc counter ctl --- src/memory_pool.c | 86 ++++++++++++++++++++++++--- src/memory_pool_internal.h | 6 ++ test/common/pool.hpp | 7 ++- test/poolFixtures.hpp | 117 +++++++++++++++++++++++++++++++++++++ 4 files changed, 207 insertions(+), 9 deletions(-) diff --git a/src/memory_pool.c b/src/memory_pool.c index c98b677b56..c6b6e9087c 100644 --- a/src/memory_pool.c +++ b/src/memory_pool.c @@ -33,7 +33,9 @@ static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT; char CTL_DEFAULT_ENTRIES[UMF_DEFAULT_SIZE][UMF_DEFAULT_LEN] = {0}; char CTL_DEFAULT_VALUES[UMF_DEFAULT_SIZE][UMF_DEFAULT_LEN] = {0}; -void ctl_init(void) { utils_mutex_init(&ctl_mtx); } +static struct ctl umf_pool_ctl_root; + +static void ctl_init(void); static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx, umf_ctl_query_source_t source, @@ -43,9 +45,15 @@ static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx, umf_ctl_query_type_t queryType) { (void)indexes, (void)source; umf_memory_pool_handle_t hPool = (umf_memory_pool_handle_t)ctx; + int ret = ctl_query(&umf_pool_ctl_root, hPool, source, extra_name, + queryType, arg, size); + if (ret == -1 && + errno == EINVAL) { // node was not found in pool_ctl_root, try to + // query the specific pool directly + hPool->ops.ext_ctl(hPool->pool_priv, source, extra_name, arg, size, + queryType); + } - hPool->ops.ext_ctl(hPool->pool_priv, /*unused*/ 0, extra_name, arg, size, - queryType); return 0; } @@ -96,9 +104,38 @@ static int CTL_SUBTREE_HANDLER(default)(void *ctx, return 0; } +static int CTL_READ_HANDLER(alloc_count)(void *ctx, + umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes, + const char *extra_name, + umf_ctl_query_type_t query_type) { + /* suppress unused-parameter errors */ + (void)source, (void)size, (void)indexes, (void)extra_name, (void)query_type; + + size_t *arg_out = arg; + if (ctx == NULL || arg_out == NULL) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + assert(size == sizeof(size_t)); + + umf_memory_pool_handle_t pool = (umf_memory_pool_handle_t)ctx; + utils_atomic_load_acquire_size_t(&pool->stats.alloc_count, arg_out); + return UMF_RESULT_SUCCESS; +} + +static const umf_ctl_node_t CTL_NODE(stats)[] = {CTL_LEAF_RO(alloc_count), + CTL_NODE_END}; + umf_ctl_node_t CTL_NODE(pool)[] = {CTL_LEAF_SUBTREE2(by_handle, by_handle_pool), CTL_LEAF_SUBTREE(default), CTL_NODE_END}; +static void ctl_init(void) { + utils_mutex_init(&ctl_mtx); + CTL_REGISTER_MODULE(&umf_pool_ctl_root, stats); +} + static umf_result_t umfDefaultCtlPoolHandle(void *hPool, int operationType, const char *name, void *arg, size_t size, @@ -160,6 +197,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, pool->flags = flags; pool->ops = *ops; pool->tag = NULL; + memset(&pool->stats, 0, sizeof(pool->stats)); if (NULL == pool->ops.ext_ctl) { pool->ops.ext_ctl = umfDefaultCtlPoolHandle; @@ -285,23 +323,47 @@ umf_result_t umfPoolCreate(const umf_memory_pool_ops_t *ops, void *umfPoolMalloc(umf_memory_pool_handle_t hPool, size_t size) { UMF_CHECK((hPool != NULL), NULL); - return hPool->ops.malloc(hPool->pool_priv, size); + void *ret = hPool->ops.malloc(hPool->pool_priv, size); + if (!ret) { + return NULL; + } + + utils_atomic_increment_size_t(&hPool->stats.alloc_count); + return ret; } void *umfPoolAlignedMalloc(umf_memory_pool_handle_t hPool, size_t size, size_t alignment) { UMF_CHECK((hPool != NULL), NULL); - return hPool->ops.aligned_malloc(hPool->pool_priv, size, alignment); + void *ret = hPool->ops.aligned_malloc(hPool->pool_priv, size, alignment); + if (!ret) { + return NULL; + } + + utils_atomic_increment_size_t(&hPool->stats.alloc_count); + return ret; } void *umfPoolCalloc(umf_memory_pool_handle_t hPool, size_t num, size_t size) { UMF_CHECK((hPool != NULL), NULL); - return hPool->ops.calloc(hPool->pool_priv, num, size); + void *ret = hPool->ops.calloc(hPool->pool_priv, num, size); + if (!ret) { + return NULL; + } + + utils_atomic_increment_size_t(&hPool->stats.alloc_count); + return ret; } void *umfPoolRealloc(umf_memory_pool_handle_t hPool, void *ptr, size_t size) { UMF_CHECK((hPool != NULL), NULL); - return hPool->ops.realloc(hPool->pool_priv, ptr, size); + void *ret = hPool->ops.realloc(hPool->pool_priv, ptr, size); + if (size == 0 && ret == NULL && ptr != NULL) { // this is free(ptr) + utils_atomic_decrement_size_t(&hPool->stats.alloc_count); + } else if (ptr == NULL && ret != NULL) { // this is malloc(size) + utils_atomic_increment_size_t(&hPool->stats.alloc_count); + } + return ret; } size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool, @@ -312,7 +374,15 @@ size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool, umf_result_t umfPoolFree(umf_memory_pool_handle_t hPool, void *ptr) { UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); - return hPool->ops.free(hPool->pool_priv, ptr); + umf_result_t ret = hPool->ops.free(hPool->pool_priv, ptr); + + if (ret != UMF_RESULT_SUCCESS) { + return ret; + } + if (ptr != NULL) { + utils_atomic_decrement_size_t(&hPool->stats.alloc_count); + } + return ret; } umf_result_t umfPoolGetLastAllocationError(umf_memory_pool_handle_t hPool) { diff --git a/src/memory_pool_internal.h b/src/memory_pool_internal.h index 4e3c316966..ad05464dd9 100644 --- a/src/memory_pool_internal.h +++ b/src/memory_pool_internal.h @@ -24,6 +24,10 @@ extern "C" { #include "base_alloc.h" #include "utils_concurrency.h" +typedef struct umf_pool_stats { + size_t alloc_count; +} umf_pool_stats_t; + typedef struct umf_memory_pool_t { void *pool_priv; umf_pool_create_flags_t flags; @@ -33,6 +37,8 @@ typedef struct umf_memory_pool_t { utils_mutex_t lock; void *tag; + // Memory pool statistics + umf_pool_stats_t stats; // ops should be the last due to possible change size in the future umf_memory_pool_ops_t ops; diff --git a/test/common/pool.hpp b/test/common/pool.hpp index a8f10ace1a..d9873810a1 100644 --- a/test/common/pool.hpp +++ b/test/common/pool.hpp @@ -83,7 +83,11 @@ bool isCallocSupported(umf_memory_pool_handle_t hPool) { return supported; } -bool isAlignedAllocSupported(umf_memory_pool_handle_t hPool) { +bool isAlignedAllocSupported([[maybe_unused]] umf_memory_pool_handle_t hPool) { +#ifdef _WIN32 + // On Windows, aligned allocation is not supported + return false; +#else static constexpr size_t allocSize = 8; static constexpr size_t alignment = 8; auto *ptr = umfPoolAlignedMalloc(hPool, allocSize, alignment); @@ -97,6 +101,7 @@ bool isAlignedAllocSupported(umf_memory_pool_handle_t hPool) { } else { throw std::runtime_error("AlignedMalloc failed with unexpected error"); } +#endif } typedef struct pool_base_t { diff --git a/test/poolFixtures.hpp b/test/poolFixtures.hpp index 870596c91f..23f519eccd 100644 --- a/test/poolFixtures.hpp +++ b/test/poolFixtures.hpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -687,4 +688,120 @@ TEST_P(umfPoolTest, pool_from_ptr_half_size_success) { #endif /* !_WIN32 */ } +TEST_P(umfPoolTest, ctl_stat_alloc_count) { + umf_memory_pool_handle_t pool_get = pool.get(); + const size_t size = 4096; + const size_t max_allocs = 10; + std::list ptrs; + size_t alloc_count = 0; + auto ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + for (size_t i = 1; i <= max_allocs; i++) { + void *ptr = umfPoolMalloc(pool_get, size); + ASSERT_NE(ptr, nullptr); + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, i); + ptrs.push_back(ptr); + } + + for (auto &ptr : ptrs) { + umf_result_t umf_result = umfPoolFree(pool_get, ptr); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + } + + ptrs.clear(); + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + + if (umf_test::isReallocSupported(pool_get)) { + for (size_t i = 1; i <= max_allocs; i++) { + void *ptr; + if (i % 2 == 0) { + ptr = umfPoolMalloc(pool_get, size); + } else { + ptr = umfPoolRealloc(pool_get, nullptr, size); + } + ASSERT_NE(ptr, nullptr); + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, i); + ptrs.push_back(ptr); + } + for (auto &ptr : ptrs) { + ptr = umfPoolRealloc(pool_get, ptr, size * 2); + ASSERT_NE(ptr, nullptr); + } + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, max_allocs); + size_t allocs = ptrs.size(); + for (auto &ptr : ptrs) { + if (allocs-- % 2 == 0) { + ptr = umfPoolRealloc(pool_get, ptr, 0); + ASSERT_EQ(ptr, nullptr); + } else { + ret = umfPoolFree(pool_get, ptr); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + } + } + ptrs.clear(); + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + } + + if (umf_test::isCallocSupported(pool_get)) { + for (size_t i = 1; i <= max_allocs; i++) { + void *ptr = umfPoolCalloc(pool_get, 1, size); + ASSERT_NE(ptr, nullptr); + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, i); + ptrs.push_back(ptr); + } + + for (auto &ptr : ptrs) { + umf_result_t umf_result = umfPoolFree(pool_get, ptr); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + } + ptrs.clear(); + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + } + + if (umf_test::isAlignedAllocSupported(pool_get)) { + for (size_t i = 1; i <= max_allocs; i++) { + void *ptr = umfPoolAlignedMalloc(pool_get, size, 4096); + ASSERT_NE(ptr, nullptr); + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, i); + ptrs.push_back(ptr); + } + + for (auto &ptr : ptrs) { + umf_result_t umf_result = umfPoolFree(pool_get, ptr); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + } + + ptrs.clear(); + ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + } +} #endif /* UMF_TEST_POOL_FIXTURES_HPP */