Skip to content

Commit d790924

Browse files
committed
[disjoint] Add CTL memory used/reserved metrics
1 parent ac85b70 commit d790924

File tree

2 files changed

+349
-2
lines changed

2 files changed

+349
-2
lines changed

src/pool/pool_disjoint.c

Lines changed: 78 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,84 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx,
7272
return UMF_RESULT_SUCCESS;
7373
}
7474

75-
static const umf_ctl_node_t CTL_NODE(disjoint)[] = {CTL_LEAF_RW(name),
76-
CTL_NODE_END};
75+
static umf_result_t CTL_READ_HANDLER(used_memory)(
76+
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size,
77+
umf_ctl_index_utlist_t *indexes, const char *extra_name,
78+
umf_ctl_query_type_t queryType) {
79+
(void)source, (void)indexes, (void)queryType, (void)extra_name;
80+
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
81+
82+
if (arg == NULL || size < sizeof(size_t)) {
83+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
84+
}
85+
86+
size_t used_memory = 0;
87+
88+
// Calculate used memory across all buckets
89+
for (size_t i = 0; i < pool->buckets_num; i++) {
90+
bucket_t *bucket = pool->buckets[i];
91+
utils_mutex_lock(&bucket->bucket_lock);
92+
93+
// Count allocated chunks in available slabs
94+
slab_list_item_t *it;
95+
for (it = bucket->available_slabs; it != NULL; it = it->next) {
96+
slab_t *slab = it->val;
97+
used_memory += slab->num_chunks_allocated * bucket->size;
98+
}
99+
100+
// Count allocated chunks in unavailable slabs (all chunks allocated)
101+
for (it = bucket->unavailable_slabs; it != NULL; it = it->next) {
102+
slab_t *slab = it->val;
103+
used_memory += slab->num_chunks_allocated * bucket->size;
104+
}
105+
106+
utils_mutex_unlock(&bucket->bucket_lock);
107+
}
108+
109+
*(size_t *)arg = used_memory;
110+
return UMF_RESULT_SUCCESS;
111+
}
112+
113+
static umf_result_t CTL_READ_HANDLER(reserved_memory)(
114+
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size,
115+
umf_ctl_index_utlist_t *indexes, const char *extra_name,
116+
umf_ctl_query_type_t queryType) {
117+
(void)source, (void)indexes, (void)queryType, (void)extra_name;
118+
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
119+
120+
if (arg == NULL || size < sizeof(size_t)) {
121+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
122+
}
123+
124+
size_t reserved_memory = 0;
125+
126+
// Calculate reserved memory across all buckets
127+
for (size_t i = 0; i < pool->buckets_num; i++) {
128+
bucket_t *bucket = pool->buckets[i];
129+
utils_mutex_lock(&bucket->bucket_lock);
130+
131+
// Count all slabs (both available and unavailable)
132+
slab_list_item_t *it;
133+
for (it = bucket->available_slabs; it != NULL; it = it->next) {
134+
slab_t *slab = it->val;
135+
reserved_memory += slab->slab_size;
136+
}
137+
138+
for (it = bucket->unavailable_slabs; it != NULL; it = it->next) {
139+
slab_t *slab = it->val;
140+
reserved_memory += slab->slab_size;
141+
}
142+
143+
utils_mutex_unlock(&bucket->bucket_lock);
144+
}
145+
146+
*(size_t *)arg = reserved_memory;
147+
return UMF_RESULT_SUCCESS;
148+
}
149+
150+
static const umf_ctl_node_t CTL_NODE(disjoint)[] = {
151+
CTL_LEAF_RW(name), CTL_LEAF_RO(used_memory), CTL_LEAF_RO(reserved_memory),
152+
CTL_NODE_END};
77153

78154
static void initialize_disjoint_ctl(void) {
79155
CTL_REGISTER_MODULE(&disjoint_ctl_root, disjoint);

test/pools/disjoint_pool_ctl.cpp

Lines changed: 271 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,10 @@
1010
#include <umf/pools/pool_disjoint.h>
1111
#include <umf/providers/provider_os_memory.h>
1212

13+
#include <vector>
14+
1315
#include "base.hpp"
16+
#include "utils_assert.h"
1417
#include "utils_log.h"
1518

1619
using umf_test::test;
@@ -152,3 +155,271 @@ TEST_F(test, disjointCtlChangeNameTwice) {
152155
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
153156
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
154157
}
158+
159+
TEST_F(test, disjointCtlUsedMemory) {
160+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
161+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
162+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
163+
GTEST_SKIP() << "OS memory provider is not supported!";
164+
}
165+
166+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
167+
os_memory_provider_params);
168+
if (providerWrapper.get() == NULL) {
169+
GTEST_SKIP() << "OS memory provider is not supported!";
170+
}
171+
172+
umf_disjoint_pool_params_handle_t params = nullptr;
173+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
174+
175+
const size_t slab_min_size = 64 * 1024;
176+
umfDisjointPoolParamsSetMinBucketSize(params, slab_min_size);
177+
178+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
179+
params);
180+
181+
// Initially, used memory should be 0
182+
size_t used_memory = 0;
183+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
184+
poolWrapper.get(), &used_memory,
185+
sizeof(used_memory)));
186+
ASSERT_EQ(used_memory, 0ull);
187+
188+
// Allocate some memory
189+
void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull);
190+
ASSERT_NE(ptr1, nullptr);
191+
192+
// Check that used memory increased
193+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
194+
poolWrapper.get(), &used_memory,
195+
sizeof(used_memory)));
196+
ASSERT_GE(used_memory, 1024ull);
197+
198+
// Allocate more memory
199+
void *ptr2 = umfPoolMalloc(poolWrapper.get(), 2048ull);
200+
ASSERT_NE(ptr2, nullptr);
201+
202+
size_t used_memory2 = 0;
203+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
204+
poolWrapper.get(), &used_memory2,
205+
sizeof(used_memory2)));
206+
ASSERT_GE(used_memory2, used_memory + 2048ull);
207+
208+
// Free memory
209+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1));
210+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2));
211+
212+
// Check that used memory is equal to 0
213+
size_t used_memory3 = 0;
214+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
215+
poolWrapper.get(), &used_memory3,
216+
sizeof(used_memory3)));
217+
ASSERT_EQ(used_memory3, 0ull);
218+
219+
// Allocate again at least slab_min_size
220+
void *ptr3 = umfPoolMalloc(poolWrapper.get(),
221+
slab_min_size);
222+
ASSERT_NE(ptr3, nullptr);
223+
224+
// Check that used memory increased
225+
size_t used_memory4 = 0;
226+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
227+
poolWrapper.get(), &used_memory4,
228+
sizeof(used_memory4)));
229+
ASSERT_EQ(used_memory4, slab_min_size);
230+
231+
// Clean up
232+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
233+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
234+
}
235+
236+
TEST_F(test, disjointCtlReservedMemory) {
237+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
238+
const size_t slab_min_size = 64 * 1024;
239+
240+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
241+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
242+
GTEST_SKIP() << "OS memory provider is not supported!";
243+
}
244+
245+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
246+
os_memory_provider_params);
247+
if (providerWrapper.get() == NULL) {
248+
GTEST_SKIP() << "OS memory provider is not supported!";
249+
}
250+
251+
umf_disjoint_pool_params_handle_t params = nullptr;
252+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
253+
254+
// Set minimum slab size
255+
umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size);
256+
257+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
258+
params);
259+
260+
// Initially, reserved memory should be 0
261+
size_t reserved_memory = 0;
262+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
263+
poolWrapper.get(), &reserved_memory,
264+
sizeof(reserved_memory)));
265+
ASSERT_EQ(reserved_memory, 0ull);
266+
267+
// Allocate some memory
268+
void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull);
269+
ASSERT_NE(ptr1, nullptr);
270+
271+
// Check that reserved memory increased (should be at least slab_min_size)
272+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
273+
poolWrapper.get(), &reserved_memory,
274+
sizeof(reserved_memory)));
275+
ASSERT_GE(reserved_memory, slab_min_size);
276+
277+
void *ptr2 = umfPoolMalloc(poolWrapper.get(), 1024ull);
278+
ASSERT_NE(ptr2, nullptr);
279+
280+
size_t reserved_memory2 = 0;
281+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
282+
poolWrapper.get(), &reserved_memory2,
283+
sizeof(reserved_memory2)));
284+
size_t used_memory = 0;
285+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
286+
poolWrapper.get(), &used_memory,
287+
sizeof(used_memory)));
288+
289+
ASSERT_GE(reserved_memory2, slab_min_size);
290+
291+
// Free memory - reserved memory should stay the same
292+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1));
293+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2));
294+
295+
size_t reserved_memory3 = 0;
296+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
297+
poolWrapper.get(), &reserved_memory3,
298+
sizeof(reserved_memory3)));
299+
ASSERT_EQ(reserved_memory3, slab_min_size);
300+
301+
// Clean up
302+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
303+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
304+
}
305+
306+
TEST_F(test, disjointCtlMemoryMetricsConsistency) {
307+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
308+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
309+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
310+
GTEST_SKIP() << "OS memory provider is not supported!";
311+
}
312+
313+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
314+
os_memory_provider_params);
315+
if (providerWrapper.get() == NULL) {
316+
GTEST_SKIP() << "OS memory provider is not supported!";
317+
}
318+
319+
umf_disjoint_pool_params_handle_t params = nullptr;
320+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
321+
322+
// Set minimum slab size
323+
size_t slab_min_size = 64 * 1024;
324+
ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size));
325+
ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 4));
326+
327+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
328+
params);
329+
330+
// const size_t alloc_size = 512; // Size of each allocation
331+
const size_t n_allocations = 10; // Number of allocations
332+
333+
// Allocate memory
334+
std::vector<void *> ptrs;
335+
for (size_t i = 0; i < n_allocations; i++) {
336+
void *ptr = umfPoolMalloc(poolWrapper.get(), slab_min_size);
337+
ASSERT_NE(ptr, nullptr);
338+
ptrs.push_back(ptr);
339+
}
340+
341+
// Get memory metrics
342+
size_t used_memory = 0;
343+
size_t reserved_memory = 0;
344+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
345+
poolWrapper.get(), &used_memory,
346+
sizeof(used_memory)));
347+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
348+
poolWrapper.get(), &reserved_memory,
349+
sizeof(reserved_memory)));
350+
351+
// Used memory should be at least the total allocated
352+
ASSERT_GE(used_memory, n_allocations * slab_min_size);
353+
354+
// Reserved memory should be at least the used memory
355+
ASSERT_GE(reserved_memory, 4 * slab_min_size);
356+
357+
// Free all memory
358+
for (void *ptr : ptrs) {
359+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr));
360+
}
361+
362+
// Check metrics after free
363+
size_t used_memory_after = 0;
364+
size_t reserved_memory_after = 0;
365+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
366+
poolWrapper.get(), &used_memory_after,
367+
sizeof(used_memory_after)));
368+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
369+
poolWrapper.get(), &reserved_memory_after,
370+
sizeof(reserved_memory_after)));
371+
372+
// Used memory should be 0 after freeing
373+
ASSERT_EQ(used_memory_after, 0ull);
374+
// Reserved memory should remain the same (pooling)
375+
ASSERT_EQ(reserved_memory_after, 4 * slab_min_size);
376+
377+
// Clean up
378+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
379+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
380+
}
381+
382+
TEST_F(test, disjointCtlMemoryMetricsInvalidArgs) {
383+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
384+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
385+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
386+
GTEST_SKIP() << "OS memory provider is not supported!";
387+
}
388+
389+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
390+
os_memory_provider_params);
391+
if (providerWrapper.get() == NULL) {
392+
GTEST_SKIP() << "OS memory provider is not supported!";
393+
}
394+
395+
umf_disjoint_pool_params_handle_t params = nullptr;
396+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
397+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
398+
params);
399+
400+
// Test invalid arguments
401+
size_t value = 0;
402+
403+
// NULL arg pointer
404+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
405+
poolWrapper.get(), NULL, sizeof(value)),
406+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
407+
408+
// Size too small
409+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
410+
poolWrapper.get(), &value, sizeof(size_t) / 2),
411+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
412+
413+
// Same tests for reserved_memory
414+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
415+
poolWrapper.get(), NULL, sizeof(value)),
416+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
417+
418+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
419+
poolWrapper.get(), &value, sizeof(int)),
420+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
421+
422+
// Clean up
423+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
424+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
425+
}

0 commit comments

Comments
 (0)