Skip to content

Commit 2c34692

Browse files
committed
add pool alloc counter ctl
1 parent 3a4a335 commit 2c34692

File tree

4 files changed

+205
-8
lines changed

4 files changed

+205
-8
lines changed

src/memory_pool.c

Lines changed: 78 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,9 @@ static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT;
3333
char CTL_DEFAULT_ENTRIES[UMF_DEFAULT_SIZE][UMF_DEFAULT_LEN] = {0};
3434
char CTL_DEFAULT_VALUES[UMF_DEFAULT_SIZE][UMF_DEFAULT_LEN] = {0};
3535

36-
void ctl_init(void) { utils_mutex_init(&ctl_mtx); }
36+
struct ctl umf_pool_ctl_root;
37+
38+
void ctl_init(void);
3739

3840
static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx,
3941
umf_ctl_query_source_t source,
@@ -43,9 +45,15 @@ static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx,
4345
umf_ctl_query_type_t queryType) {
4446
(void)indexes, (void)source;
4547
umf_memory_pool_handle_t hPool = (umf_memory_pool_handle_t)ctx;
48+
int ret = ctl_query(&umf_pool_ctl_root, hPool, source, extra_name,
49+
queryType, arg, size);
50+
if (ret == -1 &&
51+
errno == EINVAL) { // node was not found in pool_ctl_root, try to
52+
// query the specific pool directly
53+
hPool->ops.ext_ctl(hPool->pool_priv, source, extra_name, arg, size,
54+
queryType);
55+
}
4656

47-
hPool->ops.ext_ctl(hPool->pool_priv, /*unused*/ 0, extra_name, arg, size,
48-
queryType);
4957
return 0;
5058
}
5159

@@ -96,9 +104,38 @@ static int CTL_SUBTREE_HANDLER(default)(void *ctx,
96104
return 0;
97105
}
98106

107+
static int CTL_READ_HANDLER(alloc_count)(void *ctx,
108+
umf_ctl_query_source_t source,
109+
void *arg, size_t size,
110+
umf_ctl_index_utlist_t *indexes,
111+
const char *extra_name,
112+
umf_ctl_query_type_t query_type) {
113+
/* suppress unused-parameter errors */
114+
(void)source, (void)size, (void)indexes, (void)extra_name, (void)query_type;
115+
116+
size_t *arg_out = arg;
117+
if (ctx == NULL || arg_out == NULL) {
118+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
119+
}
120+
121+
assert(size == sizeof(size_t));
122+
123+
umf_memory_pool_handle_t pool = (umf_memory_pool_handle_t)ctx;
124+
utils_atomic_load_acquire_size_t(&pool->stats.alloc_count, arg_out);
125+
return 0;
126+
}
127+
128+
static const umf_ctl_node_t CTL_NODE(stats)[] = {CTL_LEAF_RO(alloc_count),
129+
CTL_NODE_END};
130+
99131
umf_ctl_node_t CTL_NODE(pool)[] = {CTL_LEAF_SUBTREE2(by_handle, by_handle_pool),
100132
CTL_LEAF_SUBTREE(default), CTL_NODE_END};
101133

134+
void ctl_init(void) {
135+
utils_mutex_init(&ctl_mtx);
136+
CTL_REGISTER_MODULE(&umf_pool_ctl_root, stats);
137+
}
138+
102139
static umf_result_t umfDefaultCtlPoolHandle(void *hPool, int operationType,
103140
const char *name, void *arg,
104141
size_t size,
@@ -160,6 +197,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
160197
pool->flags = flags;
161198
pool->ops = *ops;
162199
pool->tag = NULL;
200+
memset(&pool->stats, 0, sizeof(pool->stats));
163201

164202
if (NULL == pool->ops.ext_ctl) {
165203
pool->ops.ext_ctl = umfDefaultCtlPoolHandle;
@@ -285,23 +323,48 @@ umf_result_t umfPoolCreate(const umf_memory_pool_ops_t *ops,
285323

286324
void *umfPoolMalloc(umf_memory_pool_handle_t hPool, size_t size) {
287325
UMF_CHECK((hPool != NULL), NULL);
288-
return hPool->ops.malloc(hPool->pool_priv, size);
326+
void *ret = hPool->ops.malloc(hPool->pool_priv, size);
327+
if (!ret) {
328+
return ret;
329+
}
330+
331+
utils_atomic_increment_size_t(&hPool->stats.alloc_count);
332+
333+
return ret;
289334
}
290335

291336
void *umfPoolAlignedMalloc(umf_memory_pool_handle_t hPool, size_t size,
292337
size_t alignment) {
293338
UMF_CHECK((hPool != NULL), NULL);
294-
return hPool->ops.aligned_malloc(hPool->pool_priv, size, alignment);
339+
void *ret = hPool->ops.aligned_malloc(hPool->pool_priv, size, alignment);
340+
if (!ret) {
341+
return ret;
342+
}
343+
344+
utils_atomic_increment_size_t(&hPool->stats.alloc_count);
345+
return ret;
295346
}
296347

297348
void *umfPoolCalloc(umf_memory_pool_handle_t hPool, size_t num, size_t size) {
298349
UMF_CHECK((hPool != NULL), NULL);
299-
return hPool->ops.calloc(hPool->pool_priv, num, size);
350+
void *ret = hPool->ops.calloc(hPool->pool_priv, num, size);
351+
if (!ret) {
352+
return ret;
353+
}
354+
355+
utils_atomic_increment_size_t(&hPool->stats.alloc_count);
356+
return ret;
300357
}
301358

302359
void *umfPoolRealloc(umf_memory_pool_handle_t hPool, void *ptr, size_t size) {
303360
UMF_CHECK((hPool != NULL), NULL);
304-
return hPool->ops.realloc(hPool->pool_priv, ptr, size);
361+
void *ret = hPool->ops.realloc(hPool->pool_priv, ptr, size);
362+
if (size == 0 && ret == NULL && ptr != NULL) { // this is free(ptr)
363+
utils_atomic_decrement_size_t(&hPool->stats.alloc_count);
364+
} else if (ptr == NULL && ret != NULL) { // this is malloc(size)
365+
utils_atomic_increment_size_t(&hPool->stats.alloc_count);
366+
}
367+
return ret;
305368
}
306369

307370
size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool,
@@ -312,7 +375,14 @@ size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool,
312375

313376
umf_result_t umfPoolFree(umf_memory_pool_handle_t hPool, void *ptr) {
314377
UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT);
315-
return hPool->ops.free(hPool->pool_priv, ptr);
378+
umf_result_t ret = hPool->ops.free(hPool->pool_priv, ptr);
379+
380+
if (ret != UMF_RESULT_SUCCESS) {
381+
return ret;
382+
}
383+
384+
utils_atomic_decrement_size_t(&hPool->stats.alloc_count);
385+
return ret;
316386
}
317387

318388
umf_result_t umfPoolGetLastAllocationError(umf_memory_pool_handle_t hPool) {

src/memory_pool_internal.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@ extern "C" {
2424
#include "base_alloc.h"
2525
#include "utils_concurrency.h"
2626

27+
typedef struct umf_pool_stats {
28+
size_t alloc_count;
29+
} umf_pool_stats_t;
30+
2731
typedef struct umf_memory_pool_t {
2832
void *pool_priv;
2933
umf_pool_create_flags_t flags;
@@ -33,6 +37,8 @@ typedef struct umf_memory_pool_t {
3337

3438
utils_mutex_t lock;
3539
void *tag;
40+
// Memory pool statistics
41+
umf_pool_stats_t stats;
3642

3743
// ops should be the last due to possible change size in the future
3844
umf_memory_pool_ops_t ops;

test/common/pool.hpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,11 @@ bool isCallocSupported(umf_memory_pool_handle_t hPool) {
8484
}
8585

8686
bool isAlignedAllocSupported(umf_memory_pool_handle_t hPool) {
87+
#ifdef _WIN32
88+
// On Windows, aligned allocation is not supported
89+
return false;
90+
#else
91+
8792
static constexpr size_t allocSize = 8;
8893
static constexpr size_t alignment = 8;
8994
auto *ptr = umfPoolAlignedMalloc(hPool, allocSize, alignment);
@@ -97,6 +102,7 @@ bool isAlignedAllocSupported(umf_memory_pool_handle_t hPool) {
97102
} else {
98103
throw std::runtime_error("AlignedMalloc failed with unexpected error");
99104
}
105+
#endif
100106
}
101107

102108
typedef struct pool_base_t {

test/poolFixtures.hpp

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <array>
99
#include <cstring>
1010
#include <functional>
11+
#include <list>
1112
#include <random>
1213
#include <string>
1314
#include <thread>
@@ -687,4 +688,118 @@ TEST_P(umfPoolTest, pool_from_ptr_half_size_success) {
687688
#endif /* !_WIN32 */
688689
}
689690

691+
TEST_P(umfPoolTest, ctl_stat_alloc_count) {
692+
umf_memory_pool_handle_t pool_get = pool.get();
693+
const size_t size = 4096;
694+
const size_t num_allocs = 10;
695+
std::list<void *> ptrs;
696+
size_t arg;
697+
auto ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, &arg,
698+
sizeof(arg));
699+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
700+
ASSERT_EQ(arg, 0);
701+
for (size_t i = 0; i < num_allocs; i++) {
702+
void *ptr = umfPoolMalloc(pool_get, size);
703+
ASSERT_NE(ptr, nullptr);
704+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, &arg,
705+
sizeof(arg));
706+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
707+
ASSERT_EQ(arg, i + 1);
708+
ptrs.push_back(ptr);
709+
}
710+
711+
for (auto &ptr : ptrs) {
712+
umf_result_t umf_result = umfPoolFree(pool_get, ptr);
713+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
714+
}
715+
ptrs.clear();
716+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, &arg,
717+
sizeof(arg));
718+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
719+
ASSERT_EQ(arg, 0);
720+
721+
if (umf_test::isReallocSupported(pool_get)) {
722+
for (int i = 0; i < 10; i++) {
723+
void *ptr;
724+
if (i % 2 == 0) {
725+
ptr = umfPoolMalloc(pool_get, size);
726+
} else {
727+
ptr = umfPoolRealloc(pool_get, nullptr, size);
728+
}
729+
ASSERT_NE(ptr, nullptr);
730+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
731+
&arg, sizeof(arg));
732+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
733+
ASSERT_EQ(arg, i + 1);
734+
ptrs.push_back(ptr);
735+
}
736+
for (auto &ptr : ptrs) {
737+
ptr = umfPoolRealloc(pool_get, ptr, size * 2);
738+
ASSERT_NE(ptr, nullptr);
739+
}
740+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, &arg,
741+
sizeof(arg));
742+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
743+
ASSERT_EQ(arg, num_allocs);
744+
size_t allocs = ptrs.size();
745+
for (auto &ptr : ptrs) {
746+
if (allocs-- % 2 == 0) {
747+
ptr = umfPoolRealloc(pool_get, ptr, 0);
748+
ASSERT_EQ(ptr, nullptr);
749+
} else {
750+
ret = umfPoolFree(pool_get, ptr);
751+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
752+
}
753+
}
754+
ptrs.clear();
755+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, &arg,
756+
sizeof(arg));
757+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
758+
ASSERT_EQ(arg, 0);
759+
}
760+
761+
if (umf_test::isCallocSupported(pool_get)) {
762+
for (int i = 0; i < 10; i++) {
763+
void *ptr = umfPoolCalloc(pool_get, 1, size);
764+
ASSERT_NE(ptr, nullptr);
765+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
766+
&arg, sizeof(arg));
767+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
768+
ASSERT_EQ(arg, i + 1);
769+
ptrs.push_back(ptr);
770+
}
771+
772+
for (auto &ptr : ptrs) {
773+
umf_result_t umf_result = umfPoolFree(pool_get, ptr);
774+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
775+
}
776+
ptrs.clear();
777+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, &arg,
778+
sizeof(arg));
779+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
780+
ASSERT_EQ(arg, 0);
781+
}
782+
if (umf_test::isAlignedAllocSupported(pool_get)) {
783+
for (size_t i = 0; i < num_allocs; i++) {
784+
void *ptr = umfPoolAlignedMalloc(pool_get, size, 4096);
785+
ASSERT_NE(ptr, nullptr);
786+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
787+
&arg, sizeof(arg));
788+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
789+
ASSERT_EQ(arg, i + 1);
790+
ptrs.push_back(ptr);
791+
}
792+
793+
for (auto &ptr : ptrs) {
794+
umf_result_t umf_result = umfPoolFree(pool_get, ptr);
795+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
796+
}
797+
798+
ptrs.clear();
799+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get, &arg,
800+
sizeof(arg));
801+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
802+
ASSERT_EQ(arg, 0);
803+
}
804+
}
690805
#endif /* UMF_TEST_POOL_FIXTURES_HPP */

0 commit comments

Comments
 (0)