Skip to content

expose disjoint pool stats thru CTL #1487

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/memory_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ typedef struct by_name_arg_t {
} by_name_arg_t;

// parses optional size_t argument. if arg is not integer then sets out to size_max
int by_name_index_parser(const void *arg, void *dest, size_t dest_size) {
static int by_name_index_parser(const void *arg, void *dest, size_t dest_size) {
size_t *out = (size_t *)dest;

if (arg == NULL) {
Expand Down
172 changes: 169 additions & 3 deletions src/pool/pool_disjoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,12 +148,178 @@ CTL_READ_HANDLER(reserved_memory)(void *ctx, umf_ctl_query_source_t source,
return UMF_RESULT_SUCCESS;
}

static const umf_ctl_node_t CTL_NODE(stats)[] = {CTL_LEAF_RO(used_memory),
CTL_LEAF_RO(reserved_memory)};
static umf_result_t CTL_READ_HANDLER(count)(void *ctx,
umf_ctl_query_source_t source,
void *arg, size_t size,
umf_ctl_index_utlist_t *indexes) {
(void)source, (void)indexes;

disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
if (arg == NULL || size != sizeof(size_t)) {
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
}

if (*(size_t *)indexes->arg != SIZE_MAX) {
LOG_ERR("to read bucket count, you must call it without bucket id");
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
}
*(size_t *)arg = pool->buckets_num;

return UMF_RESULT_SUCCESS;
}

#define DEFINE_STATS_HANDLER(NAME, MEMBER) \
static umf_result_t CTL_READ_HANDLER(NAME)( \
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, \
umf_ctl_index_utlist_t *indexes) { \
(void)source; \
(void)indexes; \
disjoint_pool_t *pool = (disjoint_pool_t *)ctx; \
\
if (arg == NULL || size != sizeof(size_t)) { \
return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
} \
\
if (!pool->params.pool_trace) { \
LOG_ERR("pool trace is disabled, cannot read " #NAME); \
return UMF_RESULT_ERROR_NOT_SUPPORTED; \
} \
\
size_t total = 0; \
for (size_t i = 0; i < pool->buckets_num; ++i) { \
bucket_t *bucket = pool->buckets[i]; \
utils_mutex_lock(&bucket->bucket_lock); \
total += bucket->MEMBER; \
utils_mutex_unlock(&bucket->bucket_lock); \
} \
\
*(size_t *)arg = total; \
return UMF_RESULT_SUCCESS; \
}

DEFINE_STATS_HANDLER(alloc_nr, alloc_count)
DEFINE_STATS_HANDLER(alloc_pool_nr, alloc_pool_count)
DEFINE_STATS_HANDLER(free_nr, free_count)
DEFINE_STATS_HANDLER(curr_slabs_in_use, curr_slabs_in_use)
DEFINE_STATS_HANDLER(curr_slabs_in_pool, curr_slabs_in_pool)
DEFINE_STATS_HANDLER(max_slabs_in_use, max_slabs_in_use)
DEFINE_STATS_HANDLER(max_slabs_in_pool, max_slabs_in_pool)

static const umf_ctl_node_t CTL_NODE(stats)[] = {
CTL_LEAF_RO(used_memory),
CTL_LEAF_RO(reserved_memory),
CTL_LEAF_RO(alloc_nr),
CTL_LEAF_RO(alloc_pool_nr),
CTL_LEAF_RO(free_nr),
CTL_LEAF_RO(curr_slabs_in_use),
CTL_LEAF_RO(curr_slabs_in_pool),
CTL_LEAF_RO(max_slabs_in_use),
CTL_LEAF_RO(max_slabs_in_pool),
CTL_NODE_END,
};

#undef DEFINE_STATS_HANDLER

#ifdef UMF_DEVELOPER_MODE
#define VALIDATE_BUCKETS_NAME(indexes) \
if (strcmp("buckets", indexes->name) != 0) { \
return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
}
#else
#define VALIDATE_BUCKETS_NAME(indexes) \
do { \
} while (0);
#endif

#define DEFINE_BUCKET_STATS_HANDLER(NAME, MEMBER) \
static umf_result_t CTL_READ_HANDLER(NAME, perBucket)( \
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, \
umf_ctl_index_utlist_t *indexes) { \
(void)source; \
\
disjoint_pool_t *pool = (disjoint_pool_t *)ctx; \
if (arg == NULL || size != sizeof(size_t)) { \
LOG_ERR("arg is NULL or size is not sizeof(size_t)"); \
return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
} \
\
VALIDATE_BUCKETS_NAME(indexes); \
if (strcmp(#MEMBER, "size") != 0 && !pool->params.pool_trace) { \
LOG_ERR("pool trace is disabled, cannot read " #NAME); \
return UMF_RESULT_ERROR_NOT_SUPPORTED; \
} \
\
size_t idx; \
idx = *(size_t *)indexes->arg; \
\
if (idx >= pool->buckets_num) { \
LOG_ERR("bucket id %zu is out of range [0, %zu)", idx, \
pool->buckets_num); \
return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
} \
\
bucket_t *bucket = pool->buckets[idx]; \
*(size_t *)arg = bucket->MEMBER; \
\
return UMF_RESULT_SUCCESS; \
}

DEFINE_BUCKET_STATS_HANDLER(alloc_nr, alloc_count)
DEFINE_BUCKET_STATS_HANDLER(alloc_pool_nr, alloc_pool_count)
DEFINE_BUCKET_STATS_HANDLER(free_nr, free_count)
DEFINE_BUCKET_STATS_HANDLER(curr_slabs_in_use, curr_slabs_in_use)
DEFINE_BUCKET_STATS_HANDLER(curr_slabs_in_pool, curr_slabs_in_pool)
DEFINE_BUCKET_STATS_HANDLER(max_slabs_in_use, max_slabs_in_use)
DEFINE_BUCKET_STATS_HANDLER(max_slabs_in_pool, max_slabs_in_pool)

static const umf_ctl_node_t CTL_NODE(stats, perBucket)[] = {
CTL_LEAF_RO(alloc_nr, perBucket),
CTL_LEAF_RO(alloc_pool_nr, perBucket),
CTL_LEAF_RO(free_nr, perBucket),
CTL_LEAF_RO(curr_slabs_in_use, perBucket),
CTL_LEAF_RO(curr_slabs_in_pool, perBucket),
CTL_LEAF_RO(max_slabs_in_use, perBucket),
CTL_LEAF_RO(max_slabs_in_pool, perBucket),
CTL_NODE_END,
};

// Not a counter; but it is read exactly like other per-bucket stats, so we can use macro.
DEFINE_BUCKET_STATS_HANDLER(size, size)

#undef DEFINE_BUCKET_STATS_HANDLER

static const umf_ctl_node_t CTL_NODE(buckets)[] = {
CTL_LEAF_RO(count), CTL_LEAF_RO(size, perBucket),
CTL_CHILD(stats, perBucket), CTL_NODE_END};

static int bucket_id_parser(const void *arg, void *dest, size_t dest_size) {
size_t *out = (size_t *)dest;

if (arg == NULL) {
*out = SIZE_MAX;
return 1; // node n
}

int ret = ctl_arg_unsigned(arg, dest, dest_size);
if (ret) {
*out = SIZE_MAX;
return 1;
}

return 0;
}

static const struct ctl_argument CTL_ARG(buckets) = {
sizeof(size_t),
{{0, sizeof(size_t), CTL_ARG_TYPE_UNSIGNED_LONG_LONG, bucket_id_parser},
CTL_ARG_PARSER_END}};

static void initialize_disjoint_ctl(void) {
CTL_REGISTER_MODULE(&disjoint_ctl_root, stats);
// CTL_REGISTER_MODULE(&disjoint_ctl_root, name);
CTL_REGISTER_MODULE(&disjoint_ctl_root, buckets);
// TODO: this is hack. Need some way to register module as node with argument
disjoint_ctl_root.root[disjoint_ctl_root.first_free - 1].arg =
&CTL_ARG(buckets);
}

umf_result_t disjoint_pool_ctl(void *hPool,
Expand Down
145 changes: 145 additions & 0 deletions test/pools/disjoint_pool_ctl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -421,3 +421,148 @@ TEST_F(test, disjointCtlMemoryMetricsInvalidArgs) {
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
}

TEST_F(test, disjointCtlBucketStats) {
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
os_memory_provider_params);
if (providerWrapper.get() == NULL) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

umf_disjoint_pool_params_handle_t params = nullptr;
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));

// Set minimum slab size
size_t slab_min_size = 64 * 1024;
ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size));
ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 4));
ASSERT_SUCCESS(umfDisjointPoolParamsSetTrace(params, 3));

PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
params);

size_t arg = 0;
size_t count = 0;
const size_t alloc_size = 128;
size_t used_bucket = SIZE_MAX;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.buckets.count", &count,
sizeof(count), poolWrapper.get()));
EXPECT_EQ(count, 57ull);

auto expected_bucket_size = [](size_t i) -> size_t {
// Even indexes: 8 << (i/2) => 8,16,32,64,...
// Odd indexes: 12 << (i/2) => 12,24,48,96,...
return (i % 2 == 0) ? (size_t(8) << (i / 2)) : (size_t(12) << (i / 2));
};

for (size_t i = 0; i < count; i++) {
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.buckets.{}.size", &arg,
sizeof(arg), poolWrapper.get(), i));
EXPECT_EQ(arg, expected_bucket_size(i)) << "Failed for bucket: " << i;
if (arg >= alloc_size && used_bucket == SIZE_MAX) {
used_bucket = i; // Find the bucket that matches alloc_size
}
}

std::unordered_map<std::string, size_t> stats = {
{"alloc_nr", 0ull},
{"alloc_pool_nr", 0ull},
{"free_nr", 0ull},
{"curr_slabs_in_use", 0ull},
{"curr_slabs_in_pool", 0ull},
{"max_slabs_in_use", 0ull},
{"max_slabs_in_pool", 0ull},
};

for (const auto &s : stats) {
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.{}", &arg,
sizeof(arg), poolWrapper.get(),
s.first.c_str()));
EXPECT_EQ(arg, s.second) << "Failed for stat: " << s.first;
}

for (size_t i = 0; i < count; i++) {
for (const auto &s : stats) {
ASSERT_SUCCESS(
umfCtlGet("umf.pool.by_handle.{}.buckets.{}.stats.{}", &arg,
sizeof(arg), poolWrapper.get(), i, s.first.c_str()));
EXPECT_EQ(arg, i == used_bucket ? s.second : 0)
<< "Failed for stat: " << s.first << "bucket: " << i;
}
}

const size_t n_allocations = 10; // Number of allocations

// Allocate memory
std::vector<void *> ptrs;
for (size_t i = 0; i < n_allocations; i++) {
void *ptr = umfPoolMalloc(poolWrapper.get(), alloc_size);
ASSERT_NE(ptr, nullptr);
ptrs.push_back(ptr);
}

stats = {
{"alloc_nr", 10ull},
{"alloc_pool_nr", 9ull},
{"free_nr", 0ull},
{"curr_slabs_in_use", 1ull},
{"curr_slabs_in_pool", 0ull},
{"max_slabs_in_use", 1ull},
{"max_slabs_in_pool", 0ull},
};

for (const auto &s : stats) {
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.{}", &arg,
sizeof(arg), poolWrapper.get(),
s.first.c_str()));
EXPECT_EQ(arg, s.second) << "Failed for stat: " << s.first;
}
for (size_t i = 0; i < count; i++) {
for (const auto &s : stats) {
ASSERT_SUCCESS(
umfCtlGet("umf.pool.by_handle.{}.buckets.{}.stats.{}", &arg,
sizeof(arg), poolWrapper.get(), i, s.first.c_str()));
EXPECT_EQ(arg, i == used_bucket ? s.second : 0)
<< "Failed for stat: " << s.first << "bucket: " << i;
}
}

// Free all memory
for (void *ptr : ptrs) {
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr));
}

stats = {
{"alloc_nr", 10ull}, {"alloc_pool_nr", 9ull},
{"free_nr", 10ull}, {"curr_slabs_in_use", 0ull},
{"curr_slabs_in_pool", 1ull}, {"max_slabs_in_use", 1ull},
{"max_slabs_in_pool", 1ull},
};

for (const auto &s : stats) {
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.{}", &arg,
sizeof(arg), poolWrapper.get(),
s.first.c_str()));
EXPECT_EQ(arg, s.second) << "Failed for stat: " << s.first;
}

for (size_t i = 0; i < count; i++) {
for (const auto &s : stats) {
ASSERT_SUCCESS(
umfCtlGet("umf.pool.by_handle.{}.buckets.{}.stats.{}", &arg,
sizeof(arg), poolWrapper.get(), i, s.first.c_str()));
EXPECT_EQ(arg, i == used_bucket ? s.second : 0)
<< "Failed for stat: " << s.first << "bucket: " << i;
}
}

// Clean up
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
}
Loading