Skip to content

Commit 500f56b

Browse files
committed
add post-initialize function to pools and providers
Split between initialize and post-initialize function is necessary for properly handling CTL defaults.
1 parent 856058d commit 500f56b

17 files changed

+234
-52
lines changed

include/umf/memory_pool_ops.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ extern "C" {
2222
/// @brief Version of the Memory Pool ops structure.
2323
/// NOTE: This is equal to the latest UMF version, in which the ops structure
2424
/// has been modified.
25-
#define UMF_POOL_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 0)
25+
#define UMF_POOL_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 1)
2626

2727
///
2828
/// @brief This structure comprises function pointers used by corresponding umfPool*
@@ -166,6 +166,18 @@ typedef struct umf_memory_pool_ops_t {
166166
const char *name, void *arg, size_t size,
167167
umf_ctl_query_type_t queryType, va_list args);
168168

169+
///
170+
/// @brief Post-initializes memory pool.
171+
/// @param provider memory provider that will be used for coarse-grain allocations.
172+
/// Should contain at least one memory provider.
173+
/// @param numProvider number of elements in the providers array
174+
/// @param params pool-specific params, or NULL for defaults
175+
/// @param pool [out] returns pointer to the pool
176+
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
177+
///
178+
umf_result_t (*ext_post_initialize)(umf_memory_provider_handle_t provider,
179+
const void *params, void *pool);
180+
169181
} umf_memory_pool_ops_t;
170182

171183
#ifdef __cplusplus

include/umf/memory_provider_ops.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ extern "C" {
2121
/// @brief Version of the Memory Provider ops structure.
2222
/// NOTE: This is equal to the latest UMF version, in which the ops structure
2323
/// has been modified.
24-
#define UMF_PROVIDER_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 0)
24+
#define UMF_PROVIDER_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 1)
2525

2626
///
2727
/// @brief This structure comprises function pointers used by corresponding
@@ -278,6 +278,14 @@ typedef struct umf_memory_provider_ops_t {
278278
const char *name, void *arg, size_t size,
279279
umf_ctl_query_type_t queryType, va_list args);
280280

281+
///
282+
/// @brief Post-initializes memory provider.
283+
/// @param params provider-specific params, or NULL for defaults
284+
/// @param provider pointer to the provider
285+
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
286+
///
287+
umf_result_t (*ext_post_initialize)(const void *params, void *provider);
288+
281289
} umf_memory_provider_ops_t;
282290

283291
#ifdef __cplusplus

src/memory_pool.c

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,11 +200,24 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
200200
}
201201

202202
umf_result_t ret = UMF_RESULT_SUCCESS;
203-
204203
if (ops->version != UMF_POOL_OPS_VERSION_CURRENT) {
205204
LOG_WARN("Memory Pool ops version \"%d\" is different than the current "
206205
"version \"%d\"",
207206
ops->version, UMF_POOL_OPS_VERSION_CURRENT);
207+
208+
// Create a new ops compatible structure with the current version
209+
umf_memory_pool_ops_t compatible_ops;
210+
memset(&compatible_ops, 0, sizeof(compatible_ops));
211+
if (UMF_MINOR_VERSION(ops->version) == 0) {
212+
memcpy(&compatible_ops, ops,
213+
offsetof(umf_memory_pool_ops_t, ext_post_initialize));
214+
} else {
215+
LOG_ERR("Memory Pool ops unknown version, which \"%d\" is not "
216+
"supported",
217+
ops->version);
218+
return UMF_RESULT_ERROR_NOT_SUPPORTED;
219+
}
220+
ops = &compatible_ops;
208221
}
209222

210223
umf_memory_pool_handle_t pool =
@@ -261,7 +274,16 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
261274
}
262275
}
263276

277+
if (ops->ext_post_initialize != NULL) {
278+
ret = ops->ext_post_initialize(pool->provider, params, pool->pool_priv);
279+
if (ret != UMF_RESULT_SUCCESS) {
280+
LOG_ERR("Failed to post-initialize pool");
281+
goto err_pool_init;
282+
}
283+
}
284+
264285
*hPool = pool;
286+
265287
LOG_INFO("Memory pool created: %p", (void *)pool);
266288
return UMF_RESULT_SUCCESS;
267289

src/memory_provider.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,13 @@ static umf_result_t umfDefaultCloseIPCHandle(void *provider, void *ptr,
119119
return UMF_RESULT_ERROR_NOT_SUPPORTED;
120120
}
121121

122+
static umf_result_t umfDefaultPostInitialize(const void *params,
123+
void *provider) {
124+
(void)params;
125+
(void)provider;
126+
return UMF_RESULT_SUCCESS;
127+
}
128+
122129
static umf_result_t
123130
umfDefaultCtlHandle(void *provider, umf_ctl_query_source_t operationType,
124131
const char *name, void *arg, size_t size,
@@ -153,6 +160,10 @@ void assignOpsExtDefaults(umf_memory_provider_ops_t *ops) {
153160
if (!ops->ext_ctl) {
154161
ops->ext_ctl = umfDefaultCtlHandle;
155162
}
163+
164+
if (!ops->ext_post_initialize) {
165+
ops->ext_post_initialize = umfDefaultPostInitialize;
166+
}
156167
}
157168

158169
void assignOpsIpcDefaults(umf_memory_provider_ops_t *ops) {
@@ -250,6 +261,16 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops,
250261

251262
provider->provider_priv = provider_priv;
252263

264+
if (provider->ops.ext_post_initialize != NULL) {
265+
ret = provider->ops.ext_post_initialize(params, provider_priv);
266+
if (ret != UMF_RESULT_SUCCESS) {
267+
LOG_ERR("Failed to post-initialize provider");
268+
provider->ops.finalize(provider_priv);
269+
umf_ba_global_free(provider);
270+
return ret;
271+
}
272+
}
273+
253274
*hProvider = provider;
254275

255276
return UMF_RESULT_SUCCESS;

src/pool/pool_disjoint.c

Lines changed: 27 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -35,39 +35,39 @@ static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT;
3535

3636
// Disable name ctl for 1.0 release
3737
#if 0
38-
static umf_result_t CTL_READ_HANDLER(name)(void *ctx,
39-
umf_ctl_query_source_t source,
40-
void *arg, size_t size,
41-
umf_ctl_index_utlist_t *indexes) {
38+
static umf_result_t CTL_READ_HANDLER(name)(void* ctx,
39+
umf_ctl_query_source_t source,
40+
void* arg, size_t size,
41+
umf_ctl_index_utlist_t* indexes) {
4242
(void)source, (void)indexes;
4343

44-
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
44+
disjoint_pool_t* pool = (disjoint_pool_t*)ctx;
4545

4646
if (arg == NULL) {
4747
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
4848
}
4949

5050
if (size > 0) {
51-
strncpy((char *)arg, pool->params.name, size - 1);
52-
((char *)arg)[size - 1] = '\0';
51+
strncpy((char*)arg, pool->params.name, size - 1);
52+
((char*)arg)[size - 1] = '\0';
5353
}
5454

5555
return UMF_RESULT_SUCCESS;
5656
}
5757

5858
static const struct ctl_argument CTL_ARG(name) = CTL_ARG_STRING(255);
5959

60-
static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx,
61-
umf_ctl_query_source_t source,
62-
void *arg, size_t size,
63-
umf_ctl_index_utlist_t *indexes) {
60+
static umf_result_t CTL_WRITE_HANDLER(name)(void* ctx,
61+
umf_ctl_query_source_t source,
62+
void* arg, size_t size,
63+
umf_ctl_index_utlist_t* indexes) {
6464
(void)source, (void)indexes, (void)size;
65-
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;
65+
disjoint_pool_t* pool = (disjoint_pool_t*)ctx;
6666
if (arg == NULL) {
6767
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
6868
}
6969

70-
strncpy(pool->params.name, (char *)arg, sizeof(pool->params.name) - 1);
70+
strncpy(pool->params.name, (char*)arg, sizeof(pool->params.name) - 1);
7171
pool->params.name[sizeof(pool->params.name) - 1] = '\0';
7272

7373
return UMF_RESULT_SUCCESS;
@@ -758,9 +758,21 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider,
758758
disjoint_pool->provider = provider;
759759
disjoint_pool->params = *dp_params;
760760

761+
*ppPool = (void *)disjoint_pool;
762+
763+
return UMF_RESULT_SUCCESS;
764+
}
765+
766+
umf_result_t
767+
disjoint_pool_post_initialize(umf_memory_provider_handle_t provider,
768+
const void *params, void *ppPool) {
769+
(void)params;
770+
disjoint_pool_t *disjoint_pool = (disjoint_pool_t *)ppPool;
771+
761772
disjoint_pool->known_slabs = critnib_new(free_slab, NULL);
762773
if (disjoint_pool->known_slabs == NULL) {
763-
goto err_free_disjoint_pool;
774+
umf_ba_global_free(disjoint_pool);
775+
return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
764776
}
765777

766778
// Generate buckets sized such as: 64, 96, 128, 192, ..., CutOff.
@@ -821,8 +833,6 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider,
821833
disjoint_pool->provider_min_page_size = 0;
822834
}
823835

824-
*ppPool = (void *)disjoint_pool;
825-
826836
return UMF_RESULT_SUCCESS;
827837

828838
err_free_buckets:
@@ -838,10 +848,6 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider,
838848

839849
err_free_known_slabs:
840850
critnib_delete(disjoint_pool->known_slabs);
841-
842-
err_free_disjoint_pool:
843-
umf_ba_global_free(disjoint_pool);
844-
845851
return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
846852
}
847853

@@ -1146,7 +1152,7 @@ static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = {
11461152
.get_last_allocation_error = disjoint_pool_get_last_allocation_error,
11471153
.get_name = disjoint_pool_get_name,
11481154
.ext_ctl = disjoint_pool_ctl,
1149-
};
1155+
.ext_post_initialize = disjoint_pool_post_initialize};
11501156

11511157
const umf_memory_pool_ops_t *umfDisjointPoolOps(void) {
11521158
return &UMF_DISJOINT_POOL_OPS;

src/pool/pool_jemalloc.c

Lines changed: 45 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ typedef struct jemalloc_memory_pool_t {
6868
umf_memory_provider_handle_t provider;
6969
size_t n_arenas;
7070
char name[64];
71-
unsigned int arena_index[];
71+
unsigned int *arena_index;
7272
} jemalloc_memory_pool_t;
7373

7474
static __TLS umf_result_t TLS_last_allocation_error;
@@ -437,12 +437,29 @@ static void *op_aligned_alloc(void *pool, size_t size, size_t alignment) {
437437

438438
static umf_result_t op_initialize(umf_memory_provider_handle_t provider,
439439
const void *params, void **out_pool) {
440-
assert(provider);
440+
(void)params;
441+
(void)provider;
441442
assert(out_pool);
442443

444+
jemalloc_memory_pool_t *pool = umf_ba_global_alloc(sizeof(*pool));
445+
if (!pool) {
446+
return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
447+
}
448+
memset(pool, 0, sizeof(*pool));
449+
*out_pool = pool;
450+
451+
return UMF_RESULT_SUCCESS;
452+
}
453+
454+
static umf_result_t op_post_initialize(umf_memory_provider_handle_t provider,
455+
const void *params, void *pool) {
456+
assert(provider);
457+
assert(pool);
458+
443459
extent_hooks_t *pHooks = &arena_extent_hooks;
444460
size_t unsigned_size = sizeof(unsigned);
445461
int n_arenas_set_from_params = 0;
462+
jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool;
446463
int err;
447464
const umf_jemalloc_pool_params_t *jemalloc_params = params;
448465

@@ -454,32 +471,35 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider,
454471

455472
if (n_arenas == 0) {
456473
n_arenas = utils_get_num_cores() * 4;
457-
if (n_arenas > MALLOCX_ARENA_MAX) {
458-
n_arenas = MALLOCX_ARENA_MAX;
459-
}
474+
n_arenas = utils_min(n_arenas, (size_t)MALLOCX_ARENA_MAX);
460475
}
461476

462477
if (n_arenas > MALLOCX_ARENA_MAX) {
463478
LOG_ERR("Number of arenas %zu exceeds the limit (%i).", n_arenas,
464479
MALLOCX_ARENA_MAX);
480+
umf_ba_global_free(je_pool);
465481
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
466482
}
467483

468-
jemalloc_memory_pool_t *pool = umf_ba_global_alloc(
469-
sizeof(*pool) + n_arenas * sizeof(*pool->arena_index));
470-
if (!pool) {
484+
je_pool->arena_index =
485+
umf_ba_global_alloc(n_arenas * sizeof(*je_pool->arena_index));
486+
if (!je_pool->arena_index) {
487+
LOG_ERR("Could not allocate memory for arena indices.");
488+
umf_ba_global_free(je_pool);
471489
return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
472490
}
473-
memset(pool, 0, sizeof(*pool) + n_arenas * sizeof(*pool->arena_index));
491+
492+
memset(je_pool->arena_index, 0, n_arenas * sizeof(*je_pool->arena_index));
493+
474494
const char *pool_name = DEFAULT_NAME;
475495
if (jemalloc_params) {
476496
pool_name = jemalloc_params->name;
477497
}
478498

479-
snprintf(pool->name, sizeof(pool->name), "%s", pool_name);
499+
snprintf(je_pool->name, sizeof(je_pool->name), "%s", pool_name);
480500

481-
pool->provider = provider;
482-
pool->n_arenas = n_arenas;
501+
je_pool->provider = provider;
502+
je_pool->n_arenas = n_arenas;
483503

484504
size_t num_created = 0;
485505
for (size_t i = 0; i < n_arenas; i++) {
@@ -504,13 +524,13 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider,
504524
break;
505525
}
506526

507-
pool->arena_index[num_created++] = arena_index;
527+
je_pool->arena_index[num_created++] = arena_index;
508528
if (arena_index >= MALLOCX_ARENA_MAX) {
509529
LOG_ERR("Number of arenas exceeds the limit.");
510530
goto err_cleanup;
511531
}
512532

513-
pool_by_arena_index[arena_index] = pool;
533+
pool_by_arena_index[arena_index] = je_pool;
514534

515535
// Setup extent_hooks for the newly created arena.
516536
char cmd[64];
@@ -521,21 +541,24 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider,
521541
goto err_cleanup;
522542
}
523543
}
524-
*out_pool = (umf_memory_pool_handle_t)pool;
525544

526-
VALGRIND_DO_CREATE_MEMPOOL(pool, 0, 0);
545+
VALGRIND_DO_CREATE_MEMPOOL(je_pool, 0, 0);
527546

528547
return UMF_RESULT_SUCCESS;
529548

530549
err_cleanup:
531550
// Destroy any arenas that were successfully created.
532551
for (size_t i = 0; i < num_created; i++) {
533552
char cmd[64];
534-
unsigned arena = pool->arena_index[i];
553+
unsigned arena = je_pool->arena_index[i];
535554
snprintf(cmd, sizeof(cmd), "arena.%u.destroy", arena);
536555
(void)je_mallctl(cmd, NULL, 0, NULL, 0);
537556
}
538-
umf_ba_global_free(pool);
557+
if (je_pool->arena_index) {
558+
umf_ba_global_free(je_pool->arena_index);
559+
je_pool->arena_index = NULL;
560+
}
561+
umf_ba_global_free(je_pool);
539562
return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC;
540563
}
541564

@@ -552,6 +575,9 @@ static umf_result_t op_finalize(void *pool) {
552575
ret = UMF_RESULT_ERROR_UNKNOWN;
553576
}
554577
}
578+
if (je_pool->arena_index) {
579+
umf_ba_global_free(je_pool->arena_index);
580+
}
555581
umf_ba_global_free(je_pool);
556582

557583
VALGRIND_DO_DESTROY_MEMPOOL(pool);
@@ -600,6 +626,7 @@ static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = {
600626
.free = op_free,
601627
.get_last_allocation_error = op_get_last_allocation_error,
602628
.get_name = op_get_name,
629+
.ext_post_initialize = op_post_initialize,
603630
};
604631

605632
const umf_memory_pool_ops_t *umfJemallocPoolOps(void) {

0 commit comments

Comments
 (0)