diff --git a/.github/workflows/reusable_compatibility.yml b/.github/workflows/reusable_compatibility.yml index 573812ba4..52c933abc 100644 --- a/.github/workflows/reusable_compatibility.yml +++ b/.github/workflows/reusable_compatibility.yml @@ -99,8 +99,16 @@ jobs: UMF_LOG: level:warning;flush:debug;output:stderr;pid:no LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ run: | - ctest --verbose -E test_memoryProvider + ctest --verbose -E "test_memoryProvider|test_disjoint_pool" + + - name: Run disabled tests individually with latest UMF libs (warnings enabled) + working-directory: ${{github.workspace}}/tag_version/build + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no + LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ + run: | test/test_memoryProvider --gtest_filter="-*Trace" + test/test_disjoint_pool --gtest_filter="-test.internals" # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. @@ -225,8 +233,10 @@ jobs: env: UMF_LOG: level:warning;flush:debug;output:stderr;pid:no run: | + $env:UMF_LOG="level:warning;flush:debug;output:stderr;pid:no" cp ${{github.workspace}}/latest_version/build/bin/Debug/umf.dll ${{github.workspace}}/tag_version/build/bin/Debug/umf.dll - ctest -C Debug --verbose -E test_memoryProvider + ctest -C Debug --verbose -E "test_memoryProvider|test_disjoint_pool" + # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. @@ -368,8 +378,16 @@ jobs: UMF_LOG: level:warning;flush:debug;output:stderr;pid:no LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ run: | - ctest --verbose -E test_memoryProvider + ctest --verbose -E "test_memoryProvider|test_disjoint_pool" + + - name: Run disabled tests individually with latest UMF libs (warnings enabled) + working-directory: ${{github.workspace}}/tag_version/build + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no + LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ + run: | test/test_memoryProvider --gtest_filter="-*Trace" + test/test_disjoint_pool --gtest_filter="-test.internals" # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. diff --git a/include/umf/memory_pool_ops.h b/include/umf/memory_pool_ops.h index c9628c77e..f9632ffb5 100644 --- a/include/umf/memory_pool_ops.h +++ b/include/umf/memory_pool_ops.h @@ -36,6 +36,9 @@ typedef struct umf_memory_pool_ops_t { /// /// @brief Initializes memory pool. + /// /details + /// * The memory pool implementation *must* allocate the memory pool structure + /// and return it by the \p pool parameter. /// @param provider memory provider that will be used for coarse-grain allocations. /// @param params pool-specific params, or NULL for defaults /// @param pool [out] returns pointer to the pool @@ -191,6 +194,25 @@ typedef struct umf_memory_pool_ops_t { /// failure. /// umf_result_t (*ext_trim_memory)(void *pool, size_t minBytesToKeep); + + /// + /// @brief Post-initializes and set up memory pool. + /// Post-construction hook for memory pools, enabling advanced or deferred setup that cannot + /// be done in the initial allocation phase (e.g. setting defaults from CTL). + /// + /// \details + /// * This function *must* be implemented if the pool/provider supports CTL that overrides defaults. + /// * This function *must* free any resources allocated in the function. + /// * This function *must* be called after the memory pool has been allocated in initialize function + /// and is used to perform any additional setup required by the memory pool. + /// * This function *may* be used to set up any additional resources required by the memory pool. + /// * This function *may* be used to set up default values for the memory pool parameters set up by CTL. + /// + /// @param pool pointer to the pool + /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure + /// + umf_result_t (*ext_post_initialize)(void *pool); + } umf_memory_pool_ops_t; #ifdef __cplusplus diff --git a/include/umf/memory_provider_ops.h b/include/umf/memory_provider_ops.h index a520ed889..81d5bfb8a 100644 --- a/include/umf/memory_provider_ops.h +++ b/include/umf/memory_provider_ops.h @@ -322,6 +322,15 @@ typedef struct umf_memory_provider_ops_t { void *provider, umf_memory_property_id_t memory_property_id, size_t *size); + /// @brief Post-initializes memory provider. + /// Post-construction hook for memory pools, enabling advanced or deferred setup that cannot + /// be done in the initial allocation phase (e.g. setting defaults from CTL). + /// + /// @param provider pointer to the provider + /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. + /// + umf_result_t (*ext_post_initialize)(void *provider); + } umf_memory_provider_ops_t; #ifdef __cplusplus diff --git a/src/memory_pool.c b/src/memory_pool.c index 80dd05342..dca4128d3 100644 --- a/src/memory_pool.c +++ b/src/memory_pool.c @@ -463,6 +463,11 @@ static umf_result_t umfDefaultTrimMemory(void *provider, return UMF_RESULT_ERROR_NOT_SUPPORTED; } +static umf_result_t umfDefaultExtPostInitialize(void *pool) { + (void)pool; + return UMF_RESULT_SUCCESS; +} + // logical sum (OR) of all umf_pool_create_flags_t flags static const umf_pool_create_flags_t UMF_POOL_CREATE_FLAG_ALL = UMF_POOL_CREATE_FLAG_OWN_PROVIDER | UMF_POOL_CREATE_FLAG_DISABLE_TRACKING; @@ -495,7 +500,6 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, } umf_result_t ret = UMF_RESULT_SUCCESS; - umf_memory_pool_ops_t compatible_ops; if (ops->version != UMF_POOL_OPS_VERSION_CURRENT) { LOG_WARN("Memory Pool ops version \"%d\" is different than the current " @@ -504,8 +508,8 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, // Create a new ops compatible structure with the current version memset(&compatible_ops, 0, sizeof(compatible_ops)); - if (UMF_MINOR_VERSION(ops->version) == 0) { - LOG_INFO("Detected 1.0 version of Memory Pool ops, " + if (ops->version < UMF_MAKE_VERSION(1, 1)) { + LOG_INFO("Detected 1.0 version or below of Memory Pool ops, " "upgrading to current version"); memcpy(&compatible_ops, ops, offsetof(umf_memory_pool_ops_t, ext_trim_memory)); @@ -547,13 +551,17 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, pool->ops.ext_trim_memory = umfDefaultTrimMemory; } + if (NULL == pool->ops.ext_post_initialize) { + pool->ops.ext_post_initialize = umfDefaultExtPostInitialize; + } + if (NULL == utils_mutex_init(&pool->lock)) { LOG_ERR("Failed to initialize mutex for pool"); ret = UMF_RESULT_ERROR_UNKNOWN; goto err_lock_init; } - ret = ops->initialize(pool->provider, params, &pool->pool_priv); + ret = pool->ops.initialize(pool->provider, params, &pool->pool_priv); if (ret != UMF_RESULT_SUCCESS) { goto err_pool_init; } @@ -579,6 +587,12 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, } } + ret = pool->ops.ext_post_initialize(pool->pool_priv); + if (ret != UMF_RESULT_SUCCESS) { + LOG_ERR("Failed to post-initialize pool"); + goto err_pool_init; + } + *hPool = pool; pools_by_name_add(pool); diff --git a/src/memory_provider.c b/src/memory_provider.c index 324fa751b..5c4026c4c 100644 --- a/src/memory_provider.c +++ b/src/memory_provider.c @@ -121,6 +121,11 @@ static umf_result_t umfDefaultCloseIPCHandle(void *provider, void *ptr, return UMF_RESULT_ERROR_NOT_SUPPORTED; } +static umf_result_t umfDefaultPostInitialize(void *provider) { + (void)provider; + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + static umf_result_t umfDefaultCtlHandle(void *provider, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, @@ -183,6 +188,10 @@ void assignOpsExtDefaults(umf_memory_provider_ops_t *ops) { ops->ext_get_allocation_properties_size = umfDefaultGetAllocationPropertiesSize; } + + if (!ops->ext_post_initialize) { + ops->ext_post_initialize = umfDefaultPostInitialize; + } } void assignOpsIpcDefaults(umf_memory_provider_ops_t *ops) { @@ -310,6 +319,14 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops, provider->provider_priv = provider_priv; + ret = provider->ops.ext_post_initialize(provider_priv); + if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_NOT_SUPPORTED) { + LOG_ERR("Failed to post-initialize provider"); + provider->ops.finalize(provider_priv); + umf_ba_global_free(provider); + return ret; + } + *hProvider = provider; const char *provider_name = NULL; diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index d1327899e..5e9054c3b 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -758,6 +758,14 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, disjoint_pool->provider = provider; disjoint_pool->params = *dp_params; + *ppPool = (void *)disjoint_pool; + + return UMF_RESULT_SUCCESS; +} + +umf_result_t disjoint_pool_post_initialize(void *ppPool) { + disjoint_pool_t *disjoint_pool = (disjoint_pool_t *)ppPool; + disjoint_pool->known_slabs = critnib_new(free_slab, NULL); if (disjoint_pool->known_slabs == NULL) { goto err_free_disjoint_pool; @@ -816,13 +824,11 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, } umf_result_t ret = umfMemoryProviderGetMinPageSize( - provider, NULL, &disjoint_pool->provider_min_page_size); + disjoint_pool->provider, NULL, &disjoint_pool->provider_min_page_size); if (ret != UMF_RESULT_SUCCESS) { disjoint_pool->provider_min_page_size = 0; } - *ppPool = (void *)disjoint_pool; - return UMF_RESULT_SUCCESS; err_free_buckets: @@ -841,7 +847,6 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, err_free_disjoint_pool: umf_ba_global_free(disjoint_pool); - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } @@ -1199,7 +1204,7 @@ static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = { .get_name = disjoint_pool_get_name, .ext_ctl = disjoint_pool_ctl, .ext_trim_memory = disjoint_pool_trim_memory, -}; + .ext_post_initialize = disjoint_pool_post_initialize}; const umf_memory_pool_ops_t *umfDisjointPoolOps(void) { return &UMF_DISJOINT_POOL_OPS; diff --git a/src/pool/pool_jemalloc.c b/src/pool/pool_jemalloc.c index 1a029d66d..9a9e6a694 100644 --- a/src/pool/pool_jemalloc.c +++ b/src/pool/pool_jemalloc.c @@ -66,9 +66,10 @@ typedef struct umf_jemalloc_pool_params_t { typedef struct jemalloc_memory_pool_t { umf_memory_provider_handle_t provider; + umf_jemalloc_pool_params_t params; size_t n_arenas; char name[64]; - unsigned int arena_index[]; + unsigned int *arena_index; } jemalloc_memory_pool_t; static __TLS umf_result_t TLS_last_allocation_error; @@ -440,11 +441,35 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, assert(provider); assert(out_pool); + jemalloc_memory_pool_t *pool = umf_ba_global_alloc(sizeof(*pool)); + if (!pool) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + memset(pool, 0, sizeof(*pool)); + + pool->provider = provider; + if (params) { + pool->params = *(const umf_jemalloc_pool_params_t *)params; + } else { + // Set default values + memset(&pool->params, 0, sizeof(pool->params)); + strncpy(pool->params.name, DEFAULT_NAME, sizeof(pool->params.name) - 1); + } + + *out_pool = pool; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t op_post_initialize(void *pool) { + assert(pool); + extent_hooks_t *pHooks = &arena_extent_hooks; size_t unsigned_size = sizeof(unsigned); int n_arenas_set_from_params = 0; + jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; int err; - const umf_jemalloc_pool_params_t *jemalloc_params = params; + const umf_jemalloc_pool_params_t *jemalloc_params = &je_pool->params; size_t n_arenas = 0; if (jemalloc_params) { @@ -454,32 +479,34 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, if (n_arenas == 0) { n_arenas = utils_get_num_cores() * 4; - if (n_arenas > MALLOCX_ARENA_MAX) { - n_arenas = MALLOCX_ARENA_MAX; - } + n_arenas = utils_min(n_arenas, (size_t)MALLOCX_ARENA_MAX); } if (n_arenas > MALLOCX_ARENA_MAX) { LOG_ERR("Number of arenas %zu exceeds the limit (%i).", n_arenas, MALLOCX_ARENA_MAX); + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - jemalloc_memory_pool_t *pool = umf_ba_global_alloc( - sizeof(*pool) + n_arenas * sizeof(*pool->arena_index)); - if (!pool) { + je_pool->arena_index = + umf_ba_global_alloc(n_arenas * sizeof(*je_pool->arena_index)); + if (!je_pool->arena_index) { + LOG_ERR("Could not allocate memory for arena indices."); + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } - memset(pool, 0, sizeof(*pool) + n_arenas * sizeof(*pool->arena_index)); + + memset(je_pool->arena_index, 0, n_arenas * sizeof(*je_pool->arena_index)); + const char *pool_name = DEFAULT_NAME; if (jemalloc_params) { pool_name = jemalloc_params->name; } - snprintf(pool->name, sizeof(pool->name), "%s", pool_name); + snprintf(je_pool->name, sizeof(je_pool->name), "%s", pool_name); - pool->provider = provider; - pool->n_arenas = n_arenas; + je_pool->n_arenas = n_arenas; size_t num_created = 0; for (size_t i = 0; i < n_arenas; i++) { @@ -504,13 +531,13 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, break; } - pool->arena_index[num_created++] = arena_index; + je_pool->arena_index[num_created++] = arena_index; if (arena_index >= MALLOCX_ARENA_MAX) { LOG_ERR("Number of arenas exceeds the limit."); goto err_cleanup; } - pool_by_arena_index[arena_index] = pool; + pool_by_arena_index[arena_index] = je_pool; // Setup extent_hooks for the newly created arena. char cmd[64]; @@ -521,9 +548,8 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, goto err_cleanup; } } - *out_pool = (umf_memory_pool_handle_t)pool; - VALGRIND_DO_CREATE_MEMPOOL(pool, 0, 0); + VALGRIND_DO_CREATE_MEMPOOL(je_pool, 0, 0); return UMF_RESULT_SUCCESS; @@ -531,11 +557,15 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, // Destroy any arenas that were successfully created. for (size_t i = 0; i < num_created; i++) { char cmd[64]; - unsigned arena = pool->arena_index[i]; + unsigned arena = je_pool->arena_index[i]; snprintf(cmd, sizeof(cmd), "arena.%u.destroy", arena); (void)je_mallctl(cmd, NULL, 0, NULL, 0); } - umf_ba_global_free(pool); + if (je_pool->arena_index) { + umf_ba_global_free(je_pool->arena_index); + je_pool->arena_index = NULL; + } + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC; } @@ -552,6 +582,9 @@ static umf_result_t op_finalize(void *pool) { ret = UMF_RESULT_ERROR_UNKNOWN; } } + if (je_pool->arena_index) { + umf_ba_global_free(je_pool->arena_index); + } umf_ba_global_free(je_pool); VALGRIND_DO_DESTROY_MEMPOOL(pool); @@ -623,6 +656,7 @@ static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = { .get_last_allocation_error = op_get_last_allocation_error, .get_name = op_get_name, .ext_trim_memory = op_trim_memory, + .ext_post_initialize = op_post_initialize, }; const umf_memory_pool_ops_t *umfJemallocPoolOps(void) { diff --git a/src/pool/pool_proxy.c b/src/pool/pool_proxy.c index 6e256c491..a40c3232c 100644 --- a/src/pool/pool_proxy.c +++ b/src/pool/pool_proxy.c @@ -39,6 +39,11 @@ proxy_pool_initialize(umf_memory_provider_handle_t hProvider, return UMF_RESULT_SUCCESS; } +static umf_result_t proxy_pool_post_initialize(void *ppPool) { + (void)ppPool; + return UMF_RESULT_SUCCESS; +} + static umf_result_t proxy_pool_finalize(void *pool) { umf_ba_global_free(pool); return UMF_RESULT_SUCCESS; @@ -160,6 +165,7 @@ static umf_memory_pool_ops_t UMF_PROXY_POOL_OPS = { .get_last_allocation_error = proxy_get_last_allocation_error, .get_name = proxy_get_name, .ext_trim_memory = NULL, // not supported + .ext_post_initialize = proxy_pool_post_initialize, }; const umf_memory_pool_ops_t *umfProxyPoolOps(void) { diff --git a/src/pool/pool_scalable.c b/src/pool/pool_scalable.c index 72afce267..ef667df38 100644 --- a/src/pool/pool_scalable.c +++ b/src/pool/pool_scalable.c @@ -71,6 +71,7 @@ typedef struct tbb_callbacks_t { typedef struct tbb_memory_pool_t { umf_memory_provider_handle_t mem_provider; + umf_scalable_pool_params_t params; void *tbb_pool; char name[64]; } tbb_memory_pool_t; @@ -291,6 +292,33 @@ umfScalablePoolParamsSetName(umf_scalable_pool_params_handle_t hParams, static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, const void *params, void **pool) { + tbb_memory_pool_t *pool_data = + umf_ba_global_alloc(sizeof(tbb_memory_pool_t)); + if (!pool_data) { + LOG_ERR("cannot allocate memory for metadata"); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + + memset(pool_data, 0, sizeof(*pool_data)); + pool_data->mem_provider = provider; + + if (params) { + pool_data->params = *(const umf_scalable_pool_params_t *)params; + } else { + // Set default values + memset(&pool_data->params, 0, sizeof(pool_data->params)); + pool_data->params.granularity = DEFAULT_GRANULARITY; + pool_data->params.keep_all_memory = false; + strncpy(pool_data->params.name, DEFAULT_NAME, + sizeof(pool_data->params.name) - 1); + } + + *pool = (void *)pool_data; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t tbb_pool_post_initialize(void *pool) { tbb_mem_pool_policy_t policy = {.pAlloc = tbb_raw_alloc_wrapper, .pFree = tbb_raw_free_wrapper, .granularity = DEFAULT_GRANULARITY, @@ -299,22 +327,19 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, .keep_all_memory = false, .reserved = 0}; - const char *pool_name = DEFAULT_NAME; - // If params is provided, override defaults - if (params) { - const umf_scalable_pool_params_t *scalable_params = params; - policy.granularity = scalable_params->granularity; - policy.keep_all_memory = scalable_params->keep_all_memory; - pool_name = scalable_params->name; - } - - tbb_memory_pool_t *pool_data = - umf_ba_global_alloc(sizeof(tbb_memory_pool_t)); + tbb_memory_pool_t *pool_data = (tbb_memory_pool_t *)pool; if (!pool_data) { LOG_ERR("cannot allocate memory for metadata"); return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } - memset(pool_data, 0, sizeof(*pool_data)); + + const umf_scalable_pool_params_t *scalable_params = &pool_data->params; + const char *pool_name = scalable_params->name; + + // Use stored params + policy.granularity = scalable_params->granularity; + policy.keep_all_memory = scalable_params->keep_all_memory; + snprintf(pool_data->name, sizeof(pool_data->name), "%s", pool_name); umf_result_t res = UMF_RESULT_SUCCESS; @@ -325,7 +350,6 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, goto err_tbb_init; } - pool_data->mem_provider = provider; ret = tbb_callbacks.pool_create_v1((intptr_t)pool_data, &policy, &(pool_data->tbb_pool)); if (ret != 0 /* TBBMALLOC_OK */) { @@ -333,12 +357,9 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, goto err_tbb_init; } - *pool = (void *)pool_data; - return res; err_tbb_init: - umf_ba_global_free(pool_data); return res; } @@ -489,6 +510,7 @@ static umf_memory_pool_ops_t UMF_SCALABLE_POOL_OPS = { .ext_ctl = pool_ctl, .get_name = scalable_get_name, .ext_trim_memory = NULL, // not supported + .ext_post_initialize = tbb_pool_post_initialize, }; const umf_memory_pool_ops_t *umfScalablePoolOps(void) { diff --git a/src/provider/provider_cuda.c b/src/provider/provider_cuda.c index b6c15d2d1..f8a8d9405 100644 --- a/src/provider/provider_cuda.c +++ b/src/provider/provider_cuda.c @@ -375,13 +375,31 @@ static umf_result_t cu_memory_provider_initialize(const void *params, snprintf(cu_provider->name, sizeof(cu_provider->name), "%s", cu_params->name); + cu_provider->context = cu_params->cuda_context_handle; + cu_provider->device = cu_params->cuda_device_handle; + cu_provider->memory_type = cu_params->memory_type; + cu_provider->alloc_flags = cu_params->alloc_flags; + + *provider = cu_provider; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t cu_memory_provider_finalize(void *provider) { + umf_ba_global_free(provider); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t cu_memory_provider_post_initialize(void *provider) { + cu_memory_provider_t *cu_provider = (cu_memory_provider_t *)provider; + // CUDA alloc functions doesn't allow to provide user alignment - get the // minimum one from the driver size_t min_alignment = 0; CUmemAllocationProp allocProps = {0}; allocProps.location.type = CU_MEM_LOCATION_TYPE_DEVICE; allocProps.type = CU_MEM_ALLOCATION_TYPE_PINNED; - allocProps.location.id = cu_params->cuda_device_handle; + allocProps.location.id = cu_provider->device; CUresult cu_result = g_cu_ops.cuMemGetAllocationGranularity( &min_alignment, &allocProps, CU_MEM_ALLOC_GRANULARITY_MINIMUM); if (cu_result != CUDA_SUCCESS) { @@ -389,29 +407,16 @@ static umf_result_t cu_memory_provider_initialize(const void *params, return cu2umf_result(cu_result); } - cu_provider->context = cu_params->cuda_context_handle; - cu_provider->device = cu_params->cuda_device_handle; - cu_provider->memory_type = cu_params->memory_type; cu_provider->min_alignment = min_alignment; // If the memory type is shared (CUDA managed), the allocation flags must // be set. NOTE: we do not check here if the flags are valid - // this will be done by CUDA runtime. - if (cu_params->memory_type == UMF_MEMORY_TYPE_SHARED && - cu_params->alloc_flags == 0) { + if (cu_provider->memory_type == UMF_MEMORY_TYPE_SHARED && + cu_provider->alloc_flags == 0) { // the default setting is CU_MEM_ATTACH_GLOBAL cu_provider->alloc_flags = CU_MEM_ATTACH_GLOBAL; - } else { - cu_provider->alloc_flags = cu_params->alloc_flags; } - - *provider = cu_provider; - - return UMF_RESULT_SUCCESS; -} - -static umf_result_t cu_memory_provider_finalize(void *provider) { - umf_ba_global_free(provider); return UMF_RESULT_SUCCESS; } @@ -829,6 +834,7 @@ static umf_memory_provider_ops_t UMF_CUDA_MEMORY_PROVIDER_OPS = { cu_memory_provider_get_allocation_properties, .ext_get_allocation_properties_size = cu_memory_provider_get_allocation_properties_size, + .ext_post_initialize = cu_memory_provider_post_initialize, }; const umf_memory_provider_ops_t *umfCUDAMemoryProviderOps(void) { diff --git a/src/provider/provider_devdax_memory.c b/src/provider/provider_devdax_memory.c index 7ddf3c72a..7834b7448 100644 --- a/src/provider/provider_devdax_memory.c +++ b/src/provider/provider_devdax_memory.c @@ -589,6 +589,12 @@ static umf_result_t devdax_free(void *provider, void *ptr, size_t size) { return ret; } +static umf_result_t devdax_post_initialize(void *provider) { + (void)provider; + // For initial version, just return success + return UMF_RESULT_SUCCESS; +} + static umf_memory_provider_ops_t UMF_DEVDAX_MEMORY_PROVIDER_OPS = { .version = UMF_PROVIDER_OPS_VERSION_CURRENT, .initialize = devdax_initialize, @@ -608,7 +614,8 @@ static umf_memory_provider_ops_t UMF_DEVDAX_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = devdax_put_ipc_handle, .ext_open_ipc_handle = devdax_open_ipc_handle, .ext_close_ipc_handle = devdax_close_ipc_handle, - .ext_ctl = devdax_ctl}; + .ext_ctl = devdax_ctl, + .ext_post_initialize = devdax_post_initialize}; const umf_memory_provider_ops_t *umfDevDaxMemoryProviderOps(void) { return &UMF_DEVDAX_MEMORY_PROVIDER_OPS; diff --git a/src/provider/provider_file_memory.c b/src/provider/provider_file_memory.c index bff4034b2..a6386aa44 100644 --- a/src/provider/provider_file_memory.c +++ b/src/provider/provider_file_memory.c @@ -295,6 +295,25 @@ static umf_result_t file_initialize(const void *params, void **provider) { file_provider->coarse = coarse; + *provider = file_provider; + return UMF_RESULT_SUCCESS; + +err_close_fd: + utils_close_fd(file_provider->fd); +err_free_file_provider: + umf_ba_global_free(file_provider); + return ret; +} + +static umf_result_t file_post_initialize(void *provider) { + umf_result_t ret = UMF_RESULT_SUCCESS; + file_memory_provider_t *file_provider = provider; + + if (file_provider == NULL) { + LOG_ERR("file provider is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (utils_mutex_init(&file_provider->lock) == NULL) { LOG_ERR("lock init failed"); ret = UMF_RESULT_ERROR_UNKNOWN; @@ -315,20 +334,14 @@ static umf_result_t file_initialize(const void *params, void **provider) { goto err_delete_fd_offset_map; } - *provider = file_provider; - return UMF_RESULT_SUCCESS; -err_delete_fd_offset_map: - critnib_delete(file_provider->fd_offset_map); -err_mutex_destroy_not_free: - utils_mutex_destroy_not_free(&file_provider->lock); err_coarse_delete: coarse_delete(file_provider->coarse); -err_close_fd: - utils_close_fd(file_provider->fd); -err_free_file_provider: - umf_ba_global_free(file_provider); +err_mutex_destroy_not_free: + utils_mutex_destroy_not_free(&file_provider->lock); +err_delete_fd_offset_map: + critnib_delete(file_provider->fd_offset_map); return ret; } @@ -935,7 +948,8 @@ static umf_memory_provider_ops_t UMF_FILE_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = file_put_ipc_handle, .ext_open_ipc_handle = file_open_ipc_handle, .ext_close_ipc_handle = file_close_ipc_handle, - .ext_ctl = file_ctl}; + .ext_ctl = file_ctl, + .ext_post_initialize = file_post_initialize}; const umf_memory_provider_ops_t *umfFileMemoryProviderOps(void) { return &UMF_FILE_MEMORY_PROVIDER_OPS; diff --git a/src/provider/provider_fixed_memory.c b/src/provider/provider_fixed_memory.c index d761a4024..b7cb713f3 100644 --- a/src/provider/provider_fixed_memory.c +++ b/src/provider/provider_fixed_memory.c @@ -141,20 +141,10 @@ static umf_result_t fixed_initialize(const void *params, void **provider) { fixed_provider->base = in_params->ptr; fixed_provider->size = in_params->size; - // add the entire memory as a single block - ret = coarse_add_memory_fixed(coarse, fixed_provider->base, - fixed_provider->size); - if (ret != UMF_RESULT_SUCCESS) { - LOG_ERR("adding memory block failed"); - goto err_coarse_delete; - } - *provider = fixed_provider; return UMF_RESULT_SUCCESS; -err_coarse_delete: - coarse_delete(fixed_provider->coarse); err_free_fixed_provider: umf_ba_global_free(fixed_provider); return ret; @@ -297,6 +287,20 @@ static umf_result_t fixed_free(void *provider, void *ptr, size_t size) { return ret; } +static umf_result_t fixed_post_initialize(void *provider) { + fixed_memory_provider_t *fixed_provider = + (fixed_memory_provider_t *)provider; + umf_result_t ret = coarse_add_memory_fixed( + fixed_provider->coarse, fixed_provider->base, fixed_provider->size); + if (ret != UMF_RESULT_SUCCESS) { + LOG_ERR("adding memory block failed"); + coarse_delete(fixed_provider->coarse); + umf_ba_global_free(fixed_provider); + return ret; + } + return UMF_RESULT_SUCCESS; +} + static umf_result_t fixed_ctl(void *provider, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, @@ -325,7 +329,8 @@ static umf_memory_provider_ops_t UMF_FIXED_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = NULL, .ext_open_ipc_handle = NULL, .ext_close_ipc_handle = NULL, - .ext_ctl = fixed_ctl}; + .ext_ctl = fixed_ctl, + .ext_post_initialize = fixed_post_initialize}; const umf_memory_provider_ops_t *umfFixedMemoryProviderOps(void) { return &UMF_FIXED_MEMORY_PROVIDER_OPS; diff --git a/src/provider/provider_level_zero.c b/src/provider/provider_level_zero.c index d5ab3e8e4..fbde61d8e 100644 --- a/src/provider/provider_level_zero.c +++ b/src/provider/provider_level_zero.c @@ -954,6 +954,7 @@ static umf_memory_provider_ops_t UMF_LEVEL_ZERO_MEMORY_PROVIDER_OPS = { ze_memory_provider_get_allocation_properties, .ext_get_allocation_properties_size = ze_memory_provider_get_allocation_properties_size, + .ext_post_initialize = NULL, }; const umf_memory_provider_ops_t *umfLevelZeroMemoryProviderOps(void) { diff --git a/src/provider/provider_os_memory.c b/src/provider/provider_os_memory.c index 7e003484f..d4208d61f 100644 --- a/src/provider/provider_os_memory.c +++ b/src/provider/provider_os_memory.c @@ -602,20 +602,6 @@ static umf_result_t os_initialize(const void *params, void **provider) { } } - os_provider->nodeset_str_buf = umf_ba_global_alloc(NODESET_STR_BUF_LEN); - if (!os_provider->nodeset_str_buf) { - LOG_INFO("allocating memory for printing NUMA nodes failed"); - } else { - LOG_INFO("OS provider initialized with NUMA nodes:"); - for (unsigned i = 0; i < os_provider->nodeset_len; i++) { - if (hwloc_bitmap_list_snprintf(os_provider->nodeset_str_buf, - NODESET_STR_BUF_LEN, - os_provider->nodeset[i])) { - LOG_INFO("%s", os_provider->nodeset_str_buf); - } - } - } - *provider = os_provider; return UMF_RESULT_SUCCESS; @@ -1391,6 +1377,25 @@ static umf_result_t os_ctl(void *hProvider, query_type, arg, size, args); } +static umf_result_t os_post_initialize(void *provider) { + os_memory_provider_t *os_provider = (os_memory_provider_t *)provider; + + os_provider->nodeset_str_buf = umf_ba_global_alloc(NODESET_STR_BUF_LEN); + if (!os_provider->nodeset_str_buf) { + LOG_INFO("allocating memory for printing NUMA nodes failed"); + } else { + LOG_INFO("OS provider initialized with NUMA nodes:"); + for (unsigned i = 0; i < os_provider->nodeset_len; i++) { + if (hwloc_bitmap_list_snprintf(os_provider->nodeset_str_buf, + NODESET_STR_BUF_LEN, + os_provider->nodeset[i])) { + LOG_INFO("%s", os_provider->nodeset_str_buf); + } + } + } + return UMF_RESULT_SUCCESS; +} + static umf_memory_provider_ops_t UMF_OS_MEMORY_PROVIDER_OPS = { .version = UMF_PROVIDER_OPS_VERSION_CURRENT, .initialize = os_initialize, @@ -1411,6 +1416,7 @@ static umf_memory_provider_ops_t UMF_OS_MEMORY_PROVIDER_OPS = { .ext_open_ipc_handle = os_open_ipc_handle, .ext_close_ipc_handle = os_close_ipc_handle, .ext_ctl = os_ctl, + .ext_post_initialize = os_post_initialize, }; const umf_memory_provider_ops_t *umfOsMemoryProviderOps(void) { diff --git a/src/provider/provider_tracking.c b/src/provider/provider_tracking.c index cf76d2be7..817def113 100644 --- a/src/provider/provider_tracking.c +++ b/src/provider/provider_tracking.c @@ -959,6 +959,12 @@ static umf_result_t trackingInitialize(const void *params, void **ret) { return UMF_RESULT_SUCCESS; } +static umf_result_t trackingPostInitialize(void *provider) { + (void)provider; + // For initial version, just return success + return UMF_RESULT_SUCCESS; +} + #ifndef NDEBUG static void check_if_tracker_is_empty(umf_memory_tracker_handle_t hTracker, umf_memory_pool_handle_t pool) { @@ -1358,7 +1364,7 @@ umf_memory_provider_ops_t UMF_TRACKING_MEMORY_PROVIDER_OPS = { .ext_ctl = NULL, .ext_get_allocation_properties = trackingGetAllocationProperties, .ext_get_allocation_properties_size = trackingGetAllocationPropertiesSize, -}; + .ext_post_initialize = trackingPostInitialize}; static void free_ipc_cache_value(void *unused, void *ipc_cache_value) { (void)unused; diff --git a/test/pools/disjoint_pool.cpp b/test/pools/disjoint_pool.cpp index c638bfc3e..921e6511f 100644 --- a/test/pools/disjoint_pool.cpp +++ b/test/pools/disjoint_pool.cpp @@ -64,6 +64,7 @@ TEST_F(test, internals) { disjoint_pool_t *pool; umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool); + res = ops->ext_post_initialize((void *)pool); EXPECT_EQ(res, UMF_RESULT_SUCCESS); EXPECT_NE(pool, nullptr); EXPECT_EQ(pool->provider_min_page_size, (size_t)1024); @@ -315,6 +316,8 @@ TEST_F(test, disjointPoolTrim) { EXPECT_EQ(res, UMF_RESULT_SUCCESS); EXPECT_NE(pool, nullptr); + res = ops->ext_post_initialize((void *)pool); + // do 4 allocs, then free all of them size_t size = 64; void *ptrs[4] = {0};