Skip to content

Commit 51e8169

Browse files
committed
[disjoint] Add CTL memory used/reserved metrics
1 parent ac85b70 commit 51e8169

File tree

2 files changed

+343
-2
lines changed

2 files changed

+343
-2
lines changed

src/pool/pool_disjoint.c

Lines changed: 78 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,84 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx,
7272
return UMF_RESULT_SUCCESS;
7373
}
7474

75-
static const umf_ctl_node_t CTL_NODE(disjoint)[] = {CTL_LEAF_RW(name),
76-
CTL_NODE_END};
75+
static umf_result_t CTL_READ_HANDLER(used_memory)(
76+
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size,
77+
umf_ctl_index_utlist_t *indexes, const char *extra_name,
78+
umf_ctl_query_type_t queryType) {
79+
(void)source, (void)indexes, (void)queryType, (void)extra_name;
80+
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
81+
82+
if (arg == NULL || size < sizeof(size_t)) {
83+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
84+
}
85+
86+
size_t used_memory = 0;
87+
88+
// Calculate used memory across all buckets
89+
for (size_t i = 0; i < pool->buckets_num; i++) {
90+
bucket_t *bucket = pool->buckets[i];
91+
utils_mutex_lock(&bucket->bucket_lock);
92+
93+
// Count allocated chunks in available slabs
94+
slab_list_item_t *it;
95+
for (it = bucket->available_slabs; it != NULL; it = it->next) {
96+
slab_t *slab = it->val;
97+
used_memory += slab->num_chunks_allocated * bucket->size;
98+
}
99+
100+
// Count allocated chunks in unavailable slabs (all chunks allocated)
101+
for (it = bucket->unavailable_slabs; it != NULL; it = it->next) {
102+
slab_t *slab = it->val;
103+
used_memory += slab->num_chunks_allocated * bucket->size;
104+
}
105+
106+
utils_mutex_unlock(&bucket->bucket_lock);
107+
}
108+
109+
*(size_t *)arg = used_memory;
110+
return UMF_RESULT_SUCCESS;
111+
}
112+
113+
static umf_result_t CTL_READ_HANDLER(reserved_memory)(
114+
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size,
115+
umf_ctl_index_utlist_t *indexes, const char *extra_name,
116+
umf_ctl_query_type_t queryType) {
117+
(void)source, (void)indexes, (void)queryType, (void)extra_name;
118+
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
119+
120+
if (arg == NULL || size < sizeof(size_t)) {
121+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
122+
}
123+
124+
size_t reserved_memory = 0;
125+
126+
// Calculate reserved memory across all buckets
127+
for (size_t i = 0; i < pool->buckets_num; i++) {
128+
bucket_t *bucket = pool->buckets[i];
129+
utils_mutex_lock(&bucket->bucket_lock);
130+
131+
// Count all slabs (both available and unavailable)
132+
slab_list_item_t *it;
133+
for (it = bucket->available_slabs; it != NULL; it = it->next) {
134+
slab_t *slab = it->val;
135+
reserved_memory += slab->slab_size;
136+
}
137+
138+
for (it = bucket->unavailable_slabs; it != NULL; it = it->next) {
139+
slab_t *slab = it->val;
140+
reserved_memory += slab->slab_size;
141+
}
142+
143+
utils_mutex_unlock(&bucket->bucket_lock);
144+
}
145+
146+
*(size_t *)arg = reserved_memory;
147+
return UMF_RESULT_SUCCESS;
148+
}
149+
150+
static const umf_ctl_node_t CTL_NODE(disjoint)[] = {
151+
CTL_LEAF_RW(name), CTL_LEAF_RO(used_memory), CTL_LEAF_RO(reserved_memory),
152+
CTL_NODE_END};
77153

78154
static void initialize_disjoint_ctl(void) {
79155
CTL_REGISTER_MODULE(&disjoint_ctl_root, disjoint);

test/pools/disjoint_pool_ctl.cpp

Lines changed: 265 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,10 @@
1010
#include <umf/pools/pool_disjoint.h>
1111
#include <umf/providers/provider_os_memory.h>
1212

13+
#include <vector>
14+
1315
#include "base.hpp"
16+
#include "utils_assert.h"
1417
#include "utils_log.h"
1518

1619
using umf_test::test;
@@ -152,3 +155,265 @@ TEST_F(test, disjointCtlChangeNameTwice) {
152155
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
153156
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
154157
}
158+
159+
TEST_F(test, disjointCtlUsedMemory) {
160+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
161+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
162+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
163+
GTEST_SKIP() << "OS memory provider is not supported!";
164+
}
165+
166+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
167+
os_memory_provider_params);
168+
if (providerWrapper.get() == NULL) {
169+
GTEST_SKIP() << "OS memory provider is not supported!";
170+
}
171+
172+
umf_disjoint_pool_params_handle_t params = nullptr;
173+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
174+
175+
const size_t slab_min_size = 64 * 1024;
176+
umfDisjointPoolParamsSetMinBucketSize(params, slab_min_size);
177+
178+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
179+
params);
180+
181+
// Initially, used memory should be 0
182+
size_t used_memory = 0;
183+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
184+
poolWrapper.get(), &used_memory,
185+
sizeof(used_memory)));
186+
ASSERT_EQ(used_memory, 0ull);
187+
188+
// Allocate some memory
189+
void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull);
190+
ASSERT_NE(ptr1, nullptr);
191+
192+
// Check that used memory increased
193+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
194+
poolWrapper.get(), &used_memory,
195+
sizeof(used_memory)));
196+
ASSERT_GE(used_memory, 1024ull);
197+
198+
// Allocate more memory
199+
void *ptr2 = umfPoolMalloc(poolWrapper.get(), 2048ull);
200+
ASSERT_NE(ptr2, nullptr);
201+
202+
size_t used_memory2 = 0;
203+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
204+
poolWrapper.get(), &used_memory2,
205+
sizeof(used_memory2)));
206+
ASSERT_GE(used_memory2, used_memory + 2048ull);
207+
208+
// Free memory
209+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1));
210+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2));
211+
212+
// Check that used memory is equal to 0
213+
size_t used_memory3 = 0;
214+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
215+
poolWrapper.get(), &used_memory3,
216+
sizeof(used_memory3)));
217+
ASSERT_EQ(used_memory3, 0ull);
218+
219+
// Allocate again at least slab_min_size
220+
void *ptr3 = umfPoolMalloc(poolWrapper.get(),
221+
slab_min_size);
222+
ASSERT_NE(ptr3, nullptr);
223+
224+
// Check that used memory increased
225+
size_t used_memory4 = 0;
226+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
227+
poolWrapper.get(), &used_memory4,
228+
sizeof(used_memory4)));
229+
ASSERT_EQ(used_memory4, slab_min_size);
230+
231+
// Clean up
232+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
233+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
234+
}
235+
236+
TEST_F(test, disjointCtlReservedMemory) {
237+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
238+
const size_t slab_min_size = 64 * 1024;
239+
240+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
241+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
242+
GTEST_SKIP() << "OS memory provider is not supported!";
243+
}
244+
245+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
246+
os_memory_provider_params);
247+
if (providerWrapper.get() == NULL) {
248+
GTEST_SKIP() << "OS memory provider is not supported!";
249+
}
250+
251+
umf_disjoint_pool_params_handle_t params = nullptr;
252+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
253+
254+
// Set minimum slab size
255+
umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size);
256+
257+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
258+
params);
259+
260+
// Initially, reserved memory should be 0
261+
size_t reserved_memory = 0;
262+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
263+
poolWrapper.get(), &reserved_memory,
264+
sizeof(reserved_memory)));
265+
ASSERT_EQ(reserved_memory, 0ull);
266+
267+
// Allocate some memory
268+
void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull);
269+
ASSERT_NE(ptr1, nullptr);
270+
271+
// Check that reserved memory increased (should be at least slab_min_size)
272+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
273+
poolWrapper.get(), &reserved_memory,
274+
sizeof(reserved_memory)));
275+
ASSERT_GE(reserved_memory, slab_min_size);
276+
277+
void *ptr2 = umfPoolMalloc(poolWrapper.get(), 1024ull);
278+
ASSERT_NE(ptr2, nullptr);
279+
280+
size_t reserved_memory2 = 0;
281+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
282+
poolWrapper.get(), &reserved_memory2,
283+
sizeof(reserved_memory2)));
284+
size_t used_memory = 0;
285+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
286+
poolWrapper.get(), &used_memory,
287+
sizeof(used_memory)));
288+
289+
ASSERT_GE(reserved_memory2, slab_min_size);
290+
291+
// Free memory - reserved memory should stay the same
292+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1));
293+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2));
294+
295+
size_t reserved_memory3 = 0;
296+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
297+
poolWrapper.get(), &reserved_memory3,
298+
sizeof(reserved_memory3)));
299+
ASSERT_EQ(reserved_memory3, slab_min_size);
300+
301+
// Clean up
302+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
303+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
304+
}
305+
306+
TEST_F(test, disjointCtlMemoryMetricsConsistency) {
307+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
308+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
309+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
310+
GTEST_SKIP() << "OS memory provider is not supported!";
311+
}
312+
313+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
314+
os_memory_provider_params);
315+
if (providerWrapper.get() == NULL) {
316+
GTEST_SKIP() << "OS memory provider is not supported!";
317+
}
318+
319+
umf_disjoint_pool_params_handle_t params = nullptr;
320+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
321+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
322+
params);
323+
324+
const size_t alloc_size = 512; // Size of each allocation
325+
const size_t n_allocations = 10; // Number of allocations
326+
327+
// Allocate memory
328+
std::vector<void *> ptrs;
329+
for (size_t i = 0; i < n_allocations; i++) {
330+
void *ptr = umfPoolMalloc(poolWrapper.get(), alloc_size);
331+
ASSERT_NE(ptr, nullptr);
332+
ptrs.push_back(ptr);
333+
}
334+
335+
// Get memory metrics
336+
size_t used_memory = 0;
337+
size_t reserved_memory = 0;
338+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
339+
poolWrapper.get(), &used_memory,
340+
sizeof(used_memory)));
341+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
342+
poolWrapper.get(), &reserved_memory,
343+
sizeof(reserved_memory)));
344+
345+
// Used memory should be at least the total allocated
346+
ASSERT_GE(used_memory, n_allocations * alloc_size);
347+
348+
// Reserved memory should be at least the used memory
349+
ASSERT_GE(reserved_memory, used_memory);
350+
351+
// Free all memory
352+
for (void *ptr : ptrs) {
353+
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr));
354+
}
355+
356+
// Check metrics after free
357+
size_t used_memory_after = 0;
358+
size_t reserved_memory_after = 0;
359+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
360+
poolWrapper.get(), &used_memory_after,
361+
sizeof(used_memory_after)));
362+
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
363+
poolWrapper.get(), &reserved_memory_after,
364+
sizeof(reserved_memory_after)));
365+
366+
// Used memory should be 0 after freeing
367+
ASSERT_EQ(used_memory_after, 0ull);
368+
// Reserved memory should remain the same (pooling)
369+
ASSERT_EQ(reserved_memory_after, reserved_memory);
370+
371+
// Clean up
372+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
373+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
374+
}
375+
376+
TEST_F(test, disjointCtlMemoryMetricsInvalidArgs) {
377+
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
378+
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
379+
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
380+
GTEST_SKIP() << "OS memory provider is not supported!";
381+
}
382+
383+
ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
384+
os_memory_provider_params);
385+
if (providerWrapper.get() == NULL) {
386+
GTEST_SKIP() << "OS memory provider is not supported!";
387+
}
388+
389+
umf_disjoint_pool_params_handle_t params = nullptr;
390+
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
391+
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
392+
params);
393+
394+
// Test invalid arguments
395+
size_t value = 0;
396+
397+
// NULL arg pointer
398+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
399+
poolWrapper.get(), NULL, sizeof(value)),
400+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
401+
402+
// Size too small
403+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
404+
poolWrapper.get(), &value, sizeof(size_t) / 2),
405+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
406+
407+
// Same tests for reserved_memory
408+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
409+
poolWrapper.get(), NULL, sizeof(value)),
410+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
411+
412+
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
413+
poolWrapper.get(), &value, sizeof(int)),
414+
UMF_RESULT_ERROR_INVALID_ARGUMENT);
415+
416+
// Clean up
417+
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
418+
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
419+
}

0 commit comments

Comments
 (0)