diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index b32d2b317f..e5339376e5 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -33,6 +33,8 @@ static char *DEFAULT_NAME = "disjoint"; struct ctl disjoint_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +// Disable name ctl for 1.0 release +#if 0 static umf_result_t CTL_READ_HANDLER(name)(void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, @@ -70,12 +72,88 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx, return UMF_RESULT_SUCCESS; } +#endif +static umf_result_t +CTL_READ_HANDLER(used_memory)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + + if (arg == NULL || size != sizeof(size_t)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + size_t used_memory = 0; + + // Calculate used memory across all buckets + for (size_t i = 0; i < pool->buckets_num; i++) { + bucket_t *bucket = pool->buckets[i]; + utils_mutex_lock(&bucket->bucket_lock); + + // Count allocated chunks in available slabs + slab_list_item_t *it; + for (it = bucket->available_slabs; it != NULL; it = it->next) { + slab_t *slab = it->val; + used_memory += slab->num_chunks_allocated * bucket->size; + } + + // Count allocated chunks in unavailable slabs (all chunks allocated) + for (it = bucket->unavailable_slabs; it != NULL; it = it->next) { + slab_t *slab = it->val; + used_memory += slab->num_chunks_allocated * bucket->size; + } + + utils_mutex_unlock(&bucket->bucket_lock); + } + + *(size_t *)arg = used_memory; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_READ_HANDLER(reserved_memory)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + + if (arg == NULL || size != sizeof(size_t)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + size_t reserved_memory = 0; + + // Calculate reserved memory across all buckets + for (size_t i = 0; i < pool->buckets_num; i++) { + bucket_t *bucket = pool->buckets[i]; + utils_mutex_lock(&bucket->bucket_lock); + + // Count all slabs (both available and unavailable) + slab_list_item_t *it; + for (it = bucket->available_slabs; it != NULL; it = it->next) { + slab_t *slab = it->val; + reserved_memory += slab->slab_size; + } + + for (it = bucket->unavailable_slabs; it != NULL; it = it->next) { + slab_t *slab = it->val; + reserved_memory += slab->slab_size; + } + + utils_mutex_unlock(&bucket->bucket_lock); + } + + *(size_t *)arg = reserved_memory; + return UMF_RESULT_SUCCESS; +} -static const umf_ctl_node_t CTL_NODE(disjoint)[] = {CTL_LEAF_RW(name), - CTL_NODE_END}; +static const umf_ctl_node_t CTL_NODE(stats)[] = {CTL_LEAF_RO(used_memory), + CTL_LEAF_RO(reserved_memory)}; static void initialize_disjoint_ctl(void) { - CTL_REGISTER_MODULE(&disjoint_ctl_root, disjoint); + CTL_REGISTER_MODULE(&disjoint_ctl_root, stats); + // CTL_REGISTER_MODULE(&disjoint_ctl_root, name); } umf_result_t disjoint_pool_ctl(void *hPool, diff --git a/test/ctl/ctl_api.cpp b/test/ctl/ctl_api.cpp index 5137878c36..55120961b1 100644 --- a/test/ctl/ctl_api.cpp +++ b/test/ctl/ctl_api.cpp @@ -288,7 +288,7 @@ TEST_F(CtlTest, ctlDefaultPoolOverwrite) { ASSERT_EQ(std::string(output), values.back()); } -TEST_F(CtlTest, ctlNameValidation) { +TEST_F(CtlTest, DISABLED_ctlNameValidation) { std::string name = "umf.pool.default.disjoint.name"; std::string value = "new_disjoint_pool_name"; umf_disjoint_pool_params_handle_t params = NULL; @@ -311,7 +311,7 @@ TEST_F(CtlTest, ctlNameValidation) { p.freeResources(); } -TEST_F(CtlTest, ctlSizeValidation) { +TEST_F(CtlTest, DISABLED_ctlSizeValidation) { std::string name = "umf.pool.default.disjoint.name"; std::string value = "1234567890"; umf_disjoint_pool_params_handle_t params = NULL; @@ -340,7 +340,7 @@ TEST_F(CtlTest, ctlSizeValidation) { p.freeResources(); } -TEST_F(CtlTest, ctlExecInvalidSize) { +TEST_F(CtlTest, DISABLED_ctlExecInvalidSize) { std::string name = "umf.pool.default.disjoint.name"; ASSERT_EQ(umfCtlSet(name.c_str(), (void *)"test_value", 0), UMF_RESULT_ERROR_INVALID_ARGUMENT); diff --git a/test/pools/disjoint_pool_ctl.cpp b/test/pools/disjoint_pool_ctl.cpp index a38af623db..5de142d323 100644 --- a/test/pools/disjoint_pool_ctl.cpp +++ b/test/pools/disjoint_pool_ctl.cpp @@ -10,7 +10,10 @@ #include #include +#include + #include "base.hpp" +#include "utils_assert.h" #include "utils_log.h" using umf_test::test; @@ -86,7 +89,7 @@ class ProviderWrapper { void *m_params; }; -TEST_F(test, disjointCtlName) { +TEST_F(test, DISABLED_disjointCtlName) { umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; if (UMF_RESULT_ERROR_NOT_SUPPORTED == umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { @@ -119,7 +122,7 @@ TEST_F(test, disjointCtlName) { ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); } -TEST_F(test, disjointCtlChangeNameTwice) { +TEST_F(test, DISABLED_disjointCtlChangeNameTwice) { umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; if (UMF_RESULT_ERROR_NOT_SUPPORTED == umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { @@ -152,3 +155,269 @@ TEST_F(test, disjointCtlChangeNameTwice) { ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); } + +TEST_F(test, disjointCtlUsedMemory) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + + const size_t slab_min_size = 64 * 1024; + umfDisjointPoolParamsSetMinBucketSize(params, slab_min_size); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + // Initially, used memory should be 0 + size_t used_memory = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", + &used_memory, sizeof(used_memory), + poolWrapper.get())); + ASSERT_EQ(used_memory, 0ull); + + // Allocate some memory + void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull); + ASSERT_NE(ptr1, nullptr); + + // Check that used memory increased + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", + &used_memory, sizeof(used_memory), + poolWrapper.get())); + ASSERT_GE(used_memory, 1024ull); + + // Allocate more memory + void *ptr2 = umfPoolMalloc(poolWrapper.get(), 2048ull); + ASSERT_NE(ptr2, nullptr); + + size_t used_memory2 = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", + &used_memory2, sizeof(used_memory2), + poolWrapper.get())); + ASSERT_GE(used_memory2, used_memory + 2048ull); + + // Free memory + ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1)); + ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2)); + + // Check that used memory is equal to 0 + size_t used_memory3 = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", + &used_memory3, sizeof(used_memory3), + poolWrapper.get())); + ASSERT_EQ(used_memory3, 0ull); + + // Allocate again at least slab_min_size + void *ptr3 = umfPoolMalloc(poolWrapper.get(), slab_min_size); + ASSERT_NE(ptr3, nullptr); + + // Check that used memory increased + size_t used_memory4 = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", + &used_memory4, sizeof(used_memory4), + poolWrapper.get())); + ASSERT_EQ(used_memory4, slab_min_size); + + // Clean up + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + +TEST_F(test, disjointCtlReservedMemory) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + const size_t slab_min_size = 64 * 1024; + + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + + // Set minimum slab size + umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + // Initially, reserved memory should be 0 + size_t reserved_memory = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory, sizeof(reserved_memory), + poolWrapper.get())); + ASSERT_EQ(reserved_memory, 0ull); + + // Allocate some memory + void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull); + ASSERT_NE(ptr1, nullptr); + + // Check that reserved memory increased (should be at least slab_min_size) + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory, sizeof(reserved_memory), + poolWrapper.get())); + ASSERT_GE(reserved_memory, slab_min_size); + + void *ptr2 = umfPoolMalloc(poolWrapper.get(), 1024ull); + ASSERT_NE(ptr2, nullptr); + + size_t reserved_memory2 = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory2, sizeof(reserved_memory2), + poolWrapper.get())); + size_t used_memory = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", + &used_memory, sizeof(used_memory), + poolWrapper.get())); + + ASSERT_GE(reserved_memory2, slab_min_size); + + // Free memory - reserved memory should stay the same + ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1)); + ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2)); + + size_t reserved_memory3 = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory3, sizeof(reserved_memory3), + poolWrapper.get())); + ASSERT_EQ(reserved_memory3, slab_min_size); + + // Clean up + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + +TEST_F(test, disjointCtlMemoryMetricsConsistency) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + + // Set minimum slab size + size_t slab_min_size = 64 * 1024; + ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 4)); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + const size_t n_allocations = 10; // Number of allocations + + // Allocate memory + std::vector ptrs; + for (size_t i = 0; i < n_allocations; i++) { + void *ptr = umfPoolMalloc(poolWrapper.get(), slab_min_size); + ASSERT_NE(ptr, nullptr); + ptrs.push_back(ptr); + } + + // Get memory metrics + size_t used_memory = 0; + size_t reserved_memory = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", + &used_memory, sizeof(used_memory), + poolWrapper.get())); + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory, sizeof(reserved_memory), + poolWrapper.get())); + + // Used memory should be at least the total allocated + ASSERT_GE(used_memory, n_allocations * slab_min_size); + + // Reserved memory should be at least the used memory + ASSERT_GE(reserved_memory, 4 * slab_min_size); + + // Free all memory + for (void *ptr : ptrs) { + ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr)); + } + + // Check metrics after free + size_t used_memory_after = 0; + size_t reserved_memory_after = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", + &used_memory_after, sizeof(used_memory_after), + poolWrapper.get())); + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory_after, + sizeof(reserved_memory_after), poolWrapper.get())); + + // Used memory should be 0 after freeing + ASSERT_EQ(used_memory_after, 0ull); + // Reserved memory should remain the same (pooling) + ASSERT_EQ(reserved_memory_after, 4 * slab_min_size); + + // Clean up + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + +TEST_F(test, disjointCtlMemoryMetricsInvalidArgs) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + // Test invalid arguments + size_t value = 0; + + // NULL arg pointer + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", NULL, + sizeof(value), poolWrapper.get()), + UMF_RESULT_ERROR_INVALID_ARGUMENT); + + // Size too small + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.stats.used_memory", &value, + sizeof(size_t) / 2, poolWrapper.get()), + UMF_RESULT_ERROR_INVALID_ARGUMENT); + + // Same tests for reserved_memory + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", NULL, + sizeof(value), poolWrapper.get()), + UMF_RESULT_ERROR_INVALID_ARGUMENT); + + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", &value, + sizeof(size_t) / 2, poolWrapper.get()), + UMF_RESULT_ERROR_INVALID_ARGUMENT); + + // Clean up + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +}