@@ -44,6 +44,7 @@ static umf_result_t CTL_READ_HANDLER(name)(void *ctx,
44
44
disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
45
45
46
46
if (arg == NULL ) {
47
+ LOG_ERR ("arg is NULL" );
47
48
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
48
49
}
49
50
@@ -64,6 +65,7 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx,
64
65
(void )source , (void )indexes , (void )size ;
65
66
disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
66
67
if (arg == NULL ) {
68
+ LOG_ERR ("arg is NULL" );
67
69
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
68
70
}
69
71
@@ -81,6 +83,7 @@ CTL_READ_HANDLER(used_memory)(void *ctx, umf_ctl_query_source_t source,
81
83
disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
82
84
83
85
if (arg == NULL || size != sizeof (size_t )) {
86
+ LOG_ERR ("arg is NULL or size is not sizeof(size_t)" );
84
87
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
85
88
}
86
89
@@ -119,6 +122,7 @@ CTL_READ_HANDLER(reserved_memory)(void *ctx, umf_ctl_query_source_t source,
119
122
disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
120
123
121
124
if (arg == NULL || size != sizeof (size_t )) {
125
+ LOG_ERR ("arg is NULL or size is not sizeof(size_t)" );
122
126
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
123
127
}
124
128
@@ -148,12 +152,180 @@ CTL_READ_HANDLER(reserved_memory)(void *ctx, umf_ctl_query_source_t source,
148
152
return UMF_RESULT_SUCCESS ;
149
153
}
150
154
151
- static const umf_ctl_node_t CTL_NODE (stats )[] = {CTL_LEAF_RO (used_memory ),
152
- CTL_LEAF_RO (reserved_memory )};
155
+ static umf_result_t CTL_READ_HANDLER (count )(void * ctx ,
156
+ umf_ctl_query_source_t source ,
157
+ void * arg , size_t size ,
158
+ umf_ctl_index_utlist_t * indexes ) {
159
+ (void )source , (void )indexes ;
160
+
161
+ disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
162
+ if (arg == NULL || size != sizeof (size_t )) {
163
+ LOG_ERR ("arg is NULL or size is not sizeof(size_t)" );
164
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
165
+ }
166
+
167
+ if (* (size_t * )indexes -> arg != SIZE_MAX ) {
168
+ LOG_ERR ("to read bucket count, you must call it without bucket id" );
169
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
170
+ }
171
+ * (size_t * )arg = pool -> buckets_num ;
172
+
173
+ return UMF_RESULT_SUCCESS ;
174
+ }
175
+
176
+ #define DEFINE_STATS_HANDLER (NAME , MEMBER ) \
177
+ static umf_result_t CTL_READ_HANDLER(NAME)( \
178
+ void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, \
179
+ umf_ctl_index_utlist_t *indexes) { \
180
+ (void)source; \
181
+ (void)indexes; \
182
+ disjoint_pool_t *pool = (disjoint_pool_t *)ctx; \
183
+ \
184
+ if (arg == NULL || size != sizeof(size_t)) { \
185
+ LOG_ERR("arg is NULL or size is not sizeof(size_t)"); \
186
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
187
+ } \
188
+ \
189
+ if (!pool->params.pool_trace) { \
190
+ LOG_ERR("pool trace is disabled, cannot read " #NAME); \
191
+ return UMF_RESULT_ERROR_NOT_SUPPORTED; \
192
+ } \
193
+ \
194
+ size_t total = 0; \
195
+ for (size_t i = 0; i < pool->buckets_num; ++i) { \
196
+ bucket_t *bucket = pool->buckets[i]; \
197
+ utils_mutex_lock(&bucket->bucket_lock); \
198
+ total += bucket->MEMBER; \
199
+ utils_mutex_unlock(&bucket->bucket_lock); \
200
+ } \
201
+ \
202
+ *(size_t *)arg = total; \
203
+ return UMF_RESULT_SUCCESS; \
204
+ }
205
+
206
+ DEFINE_STATS_HANDLER (alloc_nr , alloc_count )
207
+ DEFINE_STATS_HANDLER (alloc_pool_nr , alloc_pool_count )
208
+ DEFINE_STATS_HANDLER (free_nr , free_count )
209
+ DEFINE_STATS_HANDLER (curr_slabs_in_use , curr_slabs_in_use )
210
+ DEFINE_STATS_HANDLER (curr_slabs_in_pool , curr_slabs_in_pool )
211
+ DEFINE_STATS_HANDLER (max_slabs_in_use , max_slabs_in_use )
212
+ DEFINE_STATS_HANDLER (max_slabs_in_pool , max_slabs_in_pool )
213
+
214
+ static const umf_ctl_node_t CTL_NODE (stats )[] = {
215
+ CTL_LEAF_RO (used_memory ),
216
+ CTL_LEAF_RO (reserved_memory ),
217
+ CTL_LEAF_RO (alloc_nr ),
218
+ CTL_LEAF_RO (alloc_pool_nr ),
219
+ CTL_LEAF_RO (free_nr ),
220
+ CTL_LEAF_RO (curr_slabs_in_use ),
221
+ CTL_LEAF_RO (curr_slabs_in_pool ),
222
+ CTL_LEAF_RO (max_slabs_in_use ),
223
+ CTL_LEAF_RO (max_slabs_in_pool ),
224
+ CTL_NODE_END ,
225
+ };
226
+
227
+ #undef DEFINE_STATS_HANDLER
228
+
229
+ #ifdef UMF_DEVELOPER_MODE
230
+ #define VALIDATE_BUCKETS_NAME (indexes ) \
231
+ if (strcmp("buckets", indexes->name) != 0) { \
232
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
233
+ }
234
+ #else
235
+ #define VALIDATE_BUCKETS_NAME (indexes ) \
236
+ do { \
237
+ } while (0);
238
+ #endif
239
+
240
+ #define DEFINE_BUCKET_STATS_HANDLER (NAME , MEMBER ) \
241
+ static umf_result_t CTL_READ_HANDLER(NAME, perBucket)( \
242
+ void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, \
243
+ umf_ctl_index_utlist_t *indexes) { \
244
+ (void)source; \
245
+ \
246
+ disjoint_pool_t *pool = (disjoint_pool_t *)ctx; \
247
+ if (arg == NULL || size != sizeof(size_t)) { \
248
+ LOG_ERR("arg is NULL or size is not sizeof(size_t)"); \
249
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
250
+ } \
251
+ \
252
+ VALIDATE_BUCKETS_NAME(indexes); \
253
+ if (strcmp(#MEMBER, "size") != 0 && !pool->params.pool_trace) { \
254
+ LOG_ERR("pool trace is disabled, cannot read " #NAME); \
255
+ return UMF_RESULT_ERROR_NOT_SUPPORTED; \
256
+ } \
257
+ \
258
+ size_t idx; \
259
+ idx = *(size_t *)indexes->arg; \
260
+ \
261
+ if (idx >= pool->buckets_num) { \
262
+ LOG_ERR("bucket id %zu is out of range [0, %zu)", idx, \
263
+ pool->buckets_num); \
264
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
265
+ } \
266
+ \
267
+ bucket_t *bucket = pool->buckets[idx]; \
268
+ *(size_t *)arg = bucket->MEMBER; \
269
+ \
270
+ return UMF_RESULT_SUCCESS; \
271
+ }
272
+
273
+ DEFINE_BUCKET_STATS_HANDLER (alloc_nr , alloc_count )
274
+ DEFINE_BUCKET_STATS_HANDLER (alloc_pool_nr , alloc_pool_count )
275
+ DEFINE_BUCKET_STATS_HANDLER (free_nr , free_count )
276
+ DEFINE_BUCKET_STATS_HANDLER (curr_slabs_in_use , curr_slabs_in_use )
277
+ DEFINE_BUCKET_STATS_HANDLER (curr_slabs_in_pool , curr_slabs_in_pool )
278
+ DEFINE_BUCKET_STATS_HANDLER (max_slabs_in_use , max_slabs_in_use )
279
+ DEFINE_BUCKET_STATS_HANDLER (max_slabs_in_pool , max_slabs_in_pool )
280
+
281
+ static const umf_ctl_node_t CTL_NODE (stats , perBucket )[] = {
282
+ CTL_LEAF_RO (alloc_nr , perBucket ),
283
+ CTL_LEAF_RO (alloc_pool_nr , perBucket ),
284
+ CTL_LEAF_RO (free_nr , perBucket ),
285
+ CTL_LEAF_RO (curr_slabs_in_use , perBucket ),
286
+ CTL_LEAF_RO (curr_slabs_in_pool , perBucket ),
287
+ CTL_LEAF_RO (max_slabs_in_use , perBucket ),
288
+ CTL_LEAF_RO (max_slabs_in_pool , perBucket ),
289
+ CTL_NODE_END ,
290
+ };
291
+
292
+ // Not a counter; but it is read exactly like other per-bucket stats, so we can use macro.
293
+ DEFINE_BUCKET_STATS_HANDLER (size , size )
294
+
295
+ #undef DEFINE_BUCKET_STATS_HANDLER
296
+
297
+ static const umf_ctl_node_t CTL_NODE (buckets )[] = {
298
+ CTL_LEAF_RO (count ), CTL_LEAF_RO (size , perBucket ),
299
+ CTL_CHILD (stats , perBucket ), CTL_NODE_END };
300
+
301
+ static int bucket_id_parser (const void * arg , void * dest , size_t dest_size ) {
302
+ size_t * out = (size_t * )dest ;
303
+
304
+ if (arg == NULL ) {
305
+ * out = SIZE_MAX ;
306
+ return 1 ; // node n
307
+ }
308
+
309
+ int ret = ctl_arg_unsigned (arg , dest , dest_size );
310
+ if (ret ) {
311
+ * out = SIZE_MAX ;
312
+ return 1 ;
313
+ }
314
+
315
+ return 0 ;
316
+ }
317
+
318
+ static const struct ctl_argument CTL_ARG (buckets ) = {
319
+ sizeof (size_t ),
320
+ {{0 , sizeof (size_t ), CTL_ARG_TYPE_UNSIGNED_LONG_LONG , bucket_id_parser },
321
+ CTL_ARG_PARSER_END }};
153
322
154
323
static void initialize_disjoint_ctl (void ) {
155
324
CTL_REGISTER_MODULE (& disjoint_ctl_root , stats );
156
- // CTL_REGISTER_MODULE(&disjoint_ctl_root, name);
325
+ CTL_REGISTER_MODULE (& disjoint_ctl_root , buckets );
326
+ // TODO: this is hack. Need some way to register module as node with argument
327
+ disjoint_ctl_root .root [disjoint_ctl_root .first_free - 1 ].arg =
328
+ & CTL_ARG (buckets );
157
329
}
158
330
159
331
umf_result_t disjoint_pool_ctl (void * hPool ,
0 commit comments