Skip to content

Commit 1d25c21

Browse files
committed
[disjoint] Add CTL memory used/reserved metrics
1 parent ac85b70 commit 1d25c21

File tree

2 files changed

+347
-2
lines changed

2 files changed

+347
-2
lines changed

src/pool/pool_disjoint.c

Lines changed: 78 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,84 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx,
7272
return UMF_RESULT_SUCCESS;
7373
}
7474

75-
static const umf_ctl_node_t CTL_NODE(disjoint)[] = {CTL_LEAF_RW(name),
76-
CTL_NODE_END};
75+
static umf_result_t CTL_READ_HANDLER(used_memory)(
76+
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size,
77+
umf_ctl_index_utlist_t *indexes, const char *extra_name,
78+
umf_ctl_query_type_t queryType) {
79+
(void)source, (void)indexes, (void)queryType, (void)extra_name;
80+
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
81+
82+
if (arg == NULL || size < sizeof(size_t)) {
83+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
84+
}
85+
86+
size_t used_memory = 0;
87+
88+
// Calculate used memory across all buckets
89+
for (size_t i = 0; i < pool->buckets_num; i++) {
90+
bucket_t *bucket = pool->buckets[i];
91+
utils_mutex_lock(&bucket->bucket_lock);
92+
93+
// Count allocated chunks in available slabs
94+
slab_list_item_t *it;
95+
for (it = bucket->available_slabs; it != NULL; it = it->next) {
96+
slab_t *slab = it->val;
97+
used_memory += slab->num_chunks_allocated * bucket->size;
98+
}
99+
100+
// Count allocated chunks in unavailable slabs (all chunks allocated)
101+
for (it = bucket->unavailable_slabs; it != NULL; it = it->next) {
102+
slab_t *slab = it->val;
103+
used_memory += slab->num_chunks_allocated * bucket->size;
104+
}
105+
106+
utils_mutex_unlock(&bucket->bucket_lock);
107+
}
108+
109+
*(size_t *)arg = used_memory;
110+
return UMF_RESULT_SUCCESS;
111+
}
112+
113+
static umf_result_t CTL_READ_HANDLER(reserved_memory)(
114+
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size,
115+
umf_ctl_index_utlist_t *indexes, const char *extra_name,
116+
umf_ctl_query_type_t queryType) {
117+
(void)source, (void)indexes, (void)queryType, (void)extra_name;
118+
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
119+
120+
if (arg == NULL || size < sizeof(size_t)) {
121+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
122+
}
123+
124+
size_t reserved_memory = 0;
125+
126+
// Calculate reserved memory across all buckets
127+
for (size_t i = 0; i < pool->buckets_num; i++) {
128+
bucket_t *bucket = pool->buckets[i];
129+
utils_mutex_lock(&bucket->bucket_lock);
130+
131+
// Count all slabs (both available and unavailable)
132+
slab_list_item_t *it;
133+
for (it = bucket->available_slabs; it != NULL; it = it->next) {
134+
slab_t *slab = it->val;
135+
reserved_memory += slab->slab_size;
136+
}
137+
138+
for (it = bucket->unavailable_slabs; it != NULL; it = it->next) {
139+
slab_t *slab = it->val;
140+
reserved_memory += slab->slab_size;
141+
}
142+
143+
utils_mutex_unlock(&bucket->bucket_lock);
144+
}
145+
146+
*(size_t *)arg = reserved_memory;
147+
return UMF_RESULT_SUCCESS;
148+
}
149+
150+
static const umf_ctl_node_t CTL_NODE(disjoint)[] = {
151+
CTL_LEAF_RW(name), CTL_LEAF_RO(used_memory), CTL_LEAF_RO(reserved_memory),
152+
CTL_NODE_END};
77153

78154
static void initialize_disjoint_ctl(void) {
79155
CTL_REGISTER_MODULE(&disjoint_ctl_root, disjoint);

test/pools/disjoint_pool_ctl.cpp

Lines changed: 269 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,10 @@
1010
#include <umf/pools/pool_disjoint.h>
1111
#include <umf/providers/provider_os_memory.h>
1212

13+
#include <vector>
14+
1315
#include "base.hpp"
16+
#include "utils_assert.h"
1417
#include "utils_log.h"
1518

1619
using umf_test::test;
@@ -152,3 +155,269 @@ TEST_F(test, disjointCtlChangeNameTwice) {
152155
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
153156
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
154157
}
158+
159+
TEST_F(test, disjointCtlUsedMemory) {
160+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
161+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
162+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
163+
GTEST_SKIP() << "OS memory provider is not supported!";
164+
}
165+
166+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
167+
os_memory_provider_params);
168+
if (providerWrapper.get() == NULL) {
169+
GTEST_SKIP() << "OS memory provider is not supported!";
170+
}
171+
172+
umf_disjoint_pool_params_handle_t params = nullptr;
173+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
174+
175+
const size_t slab_min_size = 64 * 1024;
176+
umfDisjointPoolParamsSetMinBucketSize(params, slab_min_size);
177+
178+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
179+
params);
180+
181+
// Initially, used memory should be 0
182+
size_t used_memory = 0;
183+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
184+
poolWrapper.get(), &used_memory,
185+
sizeof(used_memory)));
186+
ASSERT_EQ(used_memory, 0ull);
187+
188+
// Allocate some memory
189+
void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull);
190+
ASSERT_NE(ptr1, nullptr);
191+
192+
// Check that used memory increased
193+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
194+
poolWrapper.get(), &used_memory,
195+
sizeof(used_memory)));
196+
ASSERT_GE(used_memory, 1024ull);
197+
198+
// Allocate more memory
199+
void *ptr2 = umfPoolMalloc(poolWrapper.get(), 2048ull);
200+
ASSERT_NE(ptr2, nullptr);
201+
202+
size_t used_memory2 = 0;
203+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
204+
poolWrapper.get(), &used_memory2,
205+
sizeof(used_memory2)));
206+
ASSERT_GE(used_memory2, used_memory + 2048ull);
207+
208+
// Free memory
209+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1));
210+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2));
211+
212+
// Check that used memory is equal to 0
213+
size_t used_memory3 = 0;
214+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
215+
poolWrapper.get(), &used_memory3,
216+
sizeof(used_memory3)));
217+
ASSERT_EQ(used_memory3, 0ull);
218+
219+
// Allocate again at least slab_min_size
220+
void *ptr3 = umfPoolMalloc(poolWrapper.get(), slab_min_size);
221+
ASSERT_NE(ptr3, nullptr);
222+
223+
// Check that used memory increased
224+
size_t used_memory4 = 0;
225+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
226+
poolWrapper.get(), &used_memory4,
227+
sizeof(used_memory4)));
228+
ASSERT_EQ(used_memory4, slab_min_size);
229+
230+
// Clean up
231+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
232+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
233+
}
234+
235+
TEST_F(test, disjointCtlReservedMemory) {
236+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
237+
const size_t slab_min_size = 64 * 1024;
238+
239+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
240+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
241+
GTEST_SKIP() << "OS memory provider is not supported!";
242+
}
243+
244+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
245+
os_memory_provider_params);
246+
if (providerWrapper.get() == NULL) {
247+
GTEST_SKIP() << "OS memory provider is not supported!";
248+
}
249+
250+
umf_disjoint_pool_params_handle_t params = nullptr;
251+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
252+
253+
// Set minimum slab size
254+
umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size);
255+
256+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
257+
params);
258+
259+
// Initially, reserved memory should be 0
260+
size_t reserved_memory = 0;
261+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
262+
poolWrapper.get(), &reserved_memory,
263+
sizeof(reserved_memory)));
264+
ASSERT_EQ(reserved_memory, 0ull);
265+
266+
// Allocate some memory
267+
void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull);
268+
ASSERT_NE(ptr1, nullptr);
269+
270+
// Check that reserved memory increased (should be at least slab_min_size)
271+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
272+
poolWrapper.get(), &reserved_memory,
273+
sizeof(reserved_memory)));
274+
ASSERT_GE(reserved_memory, slab_min_size);
275+
276+
void *ptr2 = umfPoolMalloc(poolWrapper.get(), 1024ull);
277+
ASSERT_NE(ptr2, nullptr);
278+
279+
size_t reserved_memory2 = 0;
280+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
281+
poolWrapper.get(), &reserved_memory2,
282+
sizeof(reserved_memory2)));
283+
size_t used_memory = 0;
284+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
285+
poolWrapper.get(), &used_memory,
286+
sizeof(used_memory)));
287+
288+
ASSERT_GE(reserved_memory2, slab_min_size);
289+
290+
// Free memory - reserved memory should stay the same
291+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1));
292+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2));
293+
294+
size_t reserved_memory3 = 0;
295+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
296+
poolWrapper.get(), &reserved_memory3,
297+
sizeof(reserved_memory3)));
298+
ASSERT_EQ(reserved_memory3, slab_min_size);
299+
300+
// Clean up
301+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
302+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
303+
}
304+
305+
TEST_F(test, disjointCtlMemoryMetricsConsistency) {
306+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
307+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
308+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
309+
GTEST_SKIP() << "OS memory provider is not supported!";
310+
}
311+
312+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
313+
os_memory_provider_params);
314+
if (providerWrapper.get() == NULL) {
315+
GTEST_SKIP() << "OS memory provider is not supported!";
316+
}
317+
318+
umf_disjoint_pool_params_handle_t params = nullptr;
319+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
320+
321+
// Set minimum slab size
322+
size_t slab_min_size = 64 * 1024;
323+
ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size));
324+
ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 4));
325+
326+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
327+
params);
328+
329+
const size_t n_allocations = 10; // Number of allocations
330+
331+
// Allocate memory
332+
std::vector<void *> ptrs;
333+
for (size_t i = 0; i < n_allocations; i++) {
334+
void *ptr = umfPoolMalloc(poolWrapper.get(), slab_min_size);
335+
ASSERT_NE(ptr, nullptr);
336+
ptrs.push_back(ptr);
337+
}
338+
339+
// Get memory metrics
340+
size_t used_memory = 0;
341+
size_t reserved_memory = 0;
342+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
343+
poolWrapper.get(), &used_memory,
344+
sizeof(used_memory)));
345+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
346+
poolWrapper.get(), &reserved_memory,
347+
sizeof(reserved_memory)));
348+
349+
// Used memory should be at least the total allocated
350+
ASSERT_GE(used_memory, n_allocations * slab_min_size);
351+
352+
// Reserved memory should be at least the used memory
353+
ASSERT_GE(reserved_memory, 4 * slab_min_size);
354+
355+
// Free all memory
356+
for (void *ptr : ptrs) {
357+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr));
358+
}
359+
360+
// Check metrics after free
361+
size_t used_memory_after = 0;
362+
size_t reserved_memory_after = 0;
363+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
364+
poolWrapper.get(), &used_memory_after,
365+
sizeof(used_memory_after)));
366+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
367+
poolWrapper.get(), &reserved_memory_after,
368+
sizeof(reserved_memory_after)));
369+
370+
// Used memory should be 0 after freeing
371+
ASSERT_EQ(used_memory_after, 0ull);
372+
// Reserved memory should remain the same (pooling)
373+
ASSERT_EQ(reserved_memory_after, 4 * slab_min_size);
374+
375+
// Clean up
376+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
377+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
378+
}
379+
380+
TEST_F(test, disjointCtlMemoryMetricsInvalidArgs) {
381+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
382+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
383+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
384+
GTEST_SKIP() << "OS memory provider is not supported!";
385+
}
386+
387+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
388+
os_memory_provider_params);
389+
if (providerWrapper.get() == NULL) {
390+
GTEST_SKIP() << "OS memory provider is not supported!";
391+
}
392+
393+
umf_disjoint_pool_params_handle_t params = nullptr;
394+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
395+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
396+
params);
397+
398+
// Test invalid arguments
399+
size_t value = 0;
400+
401+
// NULL arg pointer
402+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
403+
poolWrapper.get(), NULL, sizeof(value)),
404+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
405+
406+
// Size too small
407+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
408+
poolWrapper.get(), &value, sizeof(size_t) / 2),
409+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
410+
411+
// Same tests for reserved_memory
412+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
413+
poolWrapper.get(), NULL, sizeof(value)),
414+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
415+
416+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
417+
poolWrapper.get(), &value, sizeof(size_t) / 2),
418+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
419+
420+
// Clean up
421+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
422+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
423+
}

0 commit comments

Comments
 (0)