From ff95168f559db67c99724d3bcf6e5fed2d700573 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Thu, 20 Mar 2025 16:39:52 +0100 Subject: [PATCH] use multiple arenas in jemallocpool fixed: #1076 --- include/umf/pools/pool_jemalloc.h | 27 +++++ src/libumf.def | 3 + src/libumf.map | 3 + src/pool/pool_jemalloc.c | 169 ++++++++++++++++++++++++------ src/utils/utils_common.h | 3 + src/utils/utils_posix_common.c | 15 ++- src/utils/utils_windows_common.c | 15 ++- test/pools/jemalloc_pool.cpp | 84 +++++++++++++-- test/provider_file_memory_ipc.cpp | 36 ++++++- test/provider_os_memory.cpp | 32 +++++- 10 files changed, 331 insertions(+), 56 deletions(-) diff --git a/include/umf/pools/pool_jemalloc.h b/include/umf/pools/pool_jemalloc.h index 5974e6440a..6fae911c0c 100644 --- a/include/umf/pools/pool_jemalloc.h +++ b/include/umf/pools/pool_jemalloc.h @@ -16,6 +16,33 @@ extern "C" { #include +struct umf_jemalloc_pool_params_t; + +/// @brief handle to the optional parameters of the jemalloc pool. +typedef struct umf_jemalloc_pool_params_t *umf_jemalloc_pool_params_handle_t; + +/// @brief Create an optional struct to store parameters of jemalloc pool. +/// @param hParams [out] handle to the newly created parameters struct. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfJemallocPoolParamsCreate(umf_jemalloc_pool_params_handle_t *hParams); + +/// @brief Destroy parameters struct. +/// @param hParams handle to the parameters of the jemalloc pool. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfJemallocPoolParamsDestroy(umf_jemalloc_pool_params_handle_t hParams); + +/// @brief Customize number of arenas created for this pool. Default is the number of CPU cores * 4. +/// \details +/// The number of arenas is limited by jemalloc; setting this value too high may reduce the number of pools available for creation. +/// @param hParams handle to the parameters of the jemalloc pool. +/// @param numArenas number of arenas. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, + size_t numArenas); + umf_memory_pool_ops_t *umfJemallocPoolOps(void); #ifdef __cplusplus diff --git a/src/libumf.def b/src/libumf.def index 34ecee889a..38cad6069f 100644 --- a/src/libumf.def +++ b/src/libumf.def @@ -140,3 +140,6 @@ EXPORTS umfCtlExec umfCtlGet umfCtlSet + umfJemallocPoolParamsCreate + umfJemallocPoolParamsDestroy + umfJemallocPoolParamsSetNumArenas diff --git a/src/libumf.map b/src/libumf.map index f9ec9b6bfa..ae5638faf3 100644 --- a/src/libumf.map +++ b/src/libumf.map @@ -140,4 +140,7 @@ UMF_0.12 { umfCtlExec; umfCtlGet; umfCtlSet; + umfJemallocPoolParamsCreate; + umfJemallocPoolParamsDestroy; + umfJemallocPoolParamsSetNumArenas; } UMF_0.11; diff --git a/src/pool/pool_jemalloc.c b/src/pool/pool_jemalloc.c index 10e00dea51..80baf3a8f1 100644 --- a/src/pool/pool_jemalloc.c +++ b/src/pool/pool_jemalloc.c @@ -23,6 +23,25 @@ #ifndef UMF_POOL_JEMALLOC_ENABLED umf_memory_pool_ops_t *umfJemallocPoolOps(void) { return NULL; } +umf_result_t +umfJemallocPoolParamsCreate(umf_jemalloc_pool_params_handle_t *hParams) { + (void)hParams; // unused + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + +umf_result_t +umfJemallocPoolParamsDestroy(umf_jemalloc_pool_params_handle_t hParams) { + (void)hParams; // unused + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + +umf_result_t +umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, + size_t numArenas) { + (void)hParams; // unused + (void)numArenas; // unused + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} #else @@ -30,9 +49,14 @@ umf_memory_pool_ops_t *umfJemallocPoolOps(void) { return NULL; } #define MALLOCX_ARENA_MAX (MALLCTL_ARENAS_ALL - 1) +typedef struct umf_jemalloc_pool_params_t { + size_t n_arenas; +} umf_jemalloc_pool_params_t; + typedef struct jemalloc_memory_pool_t { umf_memory_provider_handle_t provider; - unsigned int arena_index; // index of jemalloc arena + size_t n_arenas; + unsigned int arena_index[]; } jemalloc_memory_pool_t; static __TLS umf_result_t TLS_last_allocation_error; @@ -47,6 +71,14 @@ static jemalloc_memory_pool_t *get_pool_by_arena_index(unsigned arena_ind) { return pool_by_arena_index[arena_ind]; } +// SplitMix64 hash +static uint64_t hash64(uint64_t x) { + x += 0x9e3779b97f4a7c15; + x = (x ^ (x >> 30)) * 0xbf58476d1ce4e5b9; + x = (x ^ (x >> 27)) * 0x94d049bb133111eb; + return x ^ (x >> 31); +} + // arena_extent_alloc - an extent allocation function conforms to the extent_alloc_t type and upon // success returns a pointer to size bytes of mapped memory on behalf of arena arena_ind such that // the extent's base address is a multiple of alignment, as well as setting *zero to indicate @@ -285,12 +317,22 @@ static extent_hooks_t arena_extent_hooks = { .merge = arena_extent_merge, }; +static unsigned get_arena_index(jemalloc_memory_pool_t *pool) { + static __TLS unsigned tid = 0; + + if (tid == 0) { + tid = utils_gettid(); + } + + return pool->arena_index[hash64(tid) % pool->n_arenas]; +} + static void *op_malloc(void *pool, size_t size) { assert(pool); jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; // MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside // the tcache, so we wouldn't be able to guarantee isolation of different providers. - int flags = MALLOCX_ARENA(je_pool->arena_index) | MALLOCX_TCACHE_NONE; + int flags = MALLOCX_ARENA(get_arena_index(je_pool)) | MALLOCX_TCACHE_NONE; void *ptr = je_mallocx(size, flags); if (ptr == NULL) { TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; @@ -343,7 +385,7 @@ static void *op_realloc(void *pool, void *ptr, size_t size) { jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; // MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside // the tcache, so we wouldn't be able to guarantee isolation of different providers. - int flags = MALLOCX_ARENA(je_pool->arena_index) | MALLOCX_TCACHE_NONE; + int flags = MALLOCX_ARENA(get_arena_index(je_pool)) | MALLOCX_TCACHE_NONE; void *new_ptr = je_rallocx(ptr, size, flags); if (new_ptr == NULL) { TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; @@ -364,7 +406,8 @@ static void *op_realloc(void *pool, void *ptr, size_t size) { static void *op_aligned_alloc(void *pool, size_t size, size_t alignment) { assert(pool); jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; - unsigned arena = je_pool->arena_index; + + unsigned arena = get_arena_index(je_pool); // MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside // the tcache, so we wouldn't be able to guarantee isolation of different providers. int flags = @@ -382,51 +425,78 @@ static void *op_aligned_alloc(void *pool, size_t size, size_t alignment) { static umf_result_t op_initialize(umf_memory_provider_handle_t provider, void *params, void **out_pool) { - (void)params; // unused assert(provider); assert(out_pool); extent_hooks_t *pHooks = &arena_extent_hooks; size_t unsigned_size = sizeof(unsigned); int err; + umf_jemalloc_pool_params_t *jemalloc_params = + (umf_jemalloc_pool_params_t *)params; - jemalloc_memory_pool_t *pool = - umf_ba_global_alloc(sizeof(jemalloc_memory_pool_t)); - if (!pool) { - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + size_t n_arenas = 0; + if (jemalloc_params) { + n_arenas = jemalloc_params->n_arenas; } - pool->provider = provider; - - unsigned arena_index; - err = je_mallctl("arenas.create", (void *)&arena_index, &unsigned_size, - NULL, 0); - if (err) { - LOG_ERR("Could not create arena."); - goto err_free_pool; + if (n_arenas == 0) { + n_arenas = utils_get_num_cores() * 4; } - - // setup extent_hooks for newly created arena - char cmd[64]; - snprintf(cmd, sizeof(cmd), "arena.%u.extent_hooks", arena_index); - err = je_mallctl(cmd, NULL, NULL, (void *)&pHooks, sizeof(void *)); - if (err) { - snprintf(cmd, sizeof(cmd), "arena.%u.destroy", arena_index); - (void)je_mallctl(cmd, NULL, 0, NULL, 0); - LOG_ERR("Could not setup extent_hooks for newly created arena."); - goto err_free_pool; + if (n_arenas > MALLOCX_ARENA_MAX) { + LOG_ERR("Number of arenas exceeds the limit."); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - pool->arena_index = arena_index; - pool_by_arena_index[arena_index] = pool; + jemalloc_memory_pool_t *pool = umf_ba_global_alloc( + sizeof(*pool) + n_arenas * sizeof(*pool->arena_index)); + if (!pool) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + pool->provider = provider; + pool->n_arenas = n_arenas; + + size_t num_created = 0; + for (size_t i = 0; i < n_arenas; i++) { + unsigned arena_index; + err = je_mallctl("arenas.create", (void *)&arena_index, &unsigned_size, + NULL, 0); + if (err) { + LOG_ERR("Could not create arena."); + goto err_cleanup; + } + + pool->arena_index[num_created++] = arena_index; + if (arena_index >= MALLOCX_ARENA_MAX) { + LOG_ERR("Number of arenas exceeds the limit."); + goto err_cleanup; + } + + pool_by_arena_index[arena_index] = pool; + + // Setup extent_hooks for the newly created arena. + char cmd[64]; + snprintf(cmd, sizeof(cmd), "arena.%u.extent_hooks", arena_index); + err = je_mallctl(cmd, NULL, NULL, (void *)&pHooks, sizeof(void *)); + if (err) { + LOG_ERR("Could not setup extent_hooks for newly created arena."); + goto err_cleanup; + } + } *out_pool = (umf_memory_pool_handle_t)pool; VALGRIND_DO_CREATE_MEMPOOL(pool, 0, 0); return UMF_RESULT_SUCCESS; -err_free_pool: +err_cleanup: + // Destroy any arenas that were successfully created. + for (size_t i = 0; i < num_created; i++) { + char cmd[64]; + unsigned arena = pool->arena_index[i]; + snprintf(cmd, sizeof(cmd), "arena.%u.destroy", arena); + (void)je_mallctl(cmd, NULL, 0, NULL, 0); + } umf_ba_global_free(pool); return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC; } @@ -434,10 +504,12 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, static void op_finalize(void *pool) { assert(pool); jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; - char cmd[64]; - snprintf(cmd, sizeof(cmd), "arena.%u.destroy", je_pool->arena_index); - (void)je_mallctl(cmd, NULL, 0, NULL, 0); - pool_by_arena_index[je_pool->arena_index] = NULL; + for (size_t i = 0; i < je_pool->n_arenas; i++) { + char cmd[64]; + unsigned arena = je_pool->arena_index[i]; + snprintf(cmd, sizeof(cmd), "arena.%u.destroy", arena); + (void)je_mallctl(cmd, NULL, 0, NULL, 0); + } umf_ba_global_free(je_pool); VALGRIND_DO_DESTROY_MEMPOOL(pool); @@ -469,4 +541,33 @@ static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = { umf_memory_pool_ops_t *umfJemallocPoolOps(void) { return &UMF_JEMALLOC_POOL_OPS; } + +umf_result_t +umfJemallocPoolParamsCreate(umf_jemalloc_pool_params_handle_t *hParams) { + umf_jemalloc_pool_params_t *params = umf_ba_global_alloc(sizeof(*params)); + if (!params) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + memset(params, 0, sizeof(*params)); + *hParams = params; + return UMF_RESULT_SUCCESS; +} + +umf_result_t +umfJemallocPoolParamsDestroy(umf_jemalloc_pool_params_handle_t hParams) { + umf_ba_global_free(hParams); + return UMF_RESULT_SUCCESS; +} + +umf_result_t +umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, + size_t numArenas) { + if (!hParams) { + LOG_ERR("jemalloc pool params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + hParams->n_arenas = numArenas; + return UMF_RESULT_SUCCESS; +} + #endif /* UMF_POOL_JEMALLOC_ENABLED */ diff --git a/src/utils/utils_common.h b/src/utils/utils_common.h index fff44f390c..0fa860a0e5 100644 --- a/src/utils/utils_common.h +++ b/src/utils/utils_common.h @@ -115,6 +115,9 @@ int utils_getpid(void); // get the current thread ID int utils_gettid(void); +// get the number of CPU cores +unsigned utils_get_num_cores(void); + // close file descriptor int utils_close_fd(int fd); diff --git a/src/utils/utils_posix_common.c b/src/utils/utils_posix_common.c index 613b8ea41d..da051695b2 100644 --- a/src/utils/utils_posix_common.c +++ b/src/utils/utils_posix_common.c @@ -37,13 +37,17 @@ ? LLONG_MAX \ : (sizeof(off_t) == sizeof(long) ? LONG_MAX : INT_MAX)) -static UTIL_ONCE_FLAG Page_size_is_initialized = UTIL_ONCE_FLAG_INIT; +static UTIL_ONCE_FLAG System_info_is_initialized = UTIL_ONCE_FLAG_INIT; static size_t Page_size; +static unsigned Core_count; -static void _utils_get_page_size(void) { Page_size = sysconf(_SC_PAGE_SIZE); } +static void _utils_get_system_info(void) { + Page_size = sysconf(_SC_PAGE_SIZE); + Core_count = sysconf(_SC_NPROCESSORS_ONLN); +} size_t utils_get_page_size(void) { - utils_init_once(&Page_size_is_initialized, _utils_get_page_size); + utils_init_once(&System_info_is_initialized, _utils_get_system_info); return Page_size; } @@ -62,6 +66,11 @@ int utils_gettid(void) { #endif } +unsigned utils_get_num_cores(void) { + utils_init_once(&System_info_is_initialized, _utils_get_system_info); + return Core_count; +} + int utils_close_fd(int fd) { return close(fd); } umf_result_t utils_errno_to_umf_result(int err) { diff --git a/src/utils/utils_windows_common.c b/src/utils/utils_windows_common.c index b6c5b0b4ee..7aa8f7684d 100644 --- a/src/utils/utils_windows_common.c +++ b/src/utils/utils_windows_common.c @@ -1,6 +1,6 @@ /* * - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation * * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -22,20 +22,27 @@ #define BUFFER_SIZE 1024 -static UTIL_ONCE_FLAG Page_size_is_initialized = UTIL_ONCE_FLAG_INIT; +static UTIL_ONCE_FLAG System_info_is_initialized = UTIL_ONCE_FLAG_INIT; static size_t Page_size; +static unsigned Core_count; -static void _utils_get_page_size(void) { +static void _utils_get_system_info(void) { SYSTEM_INFO SystemInfo; GetSystemInfo(&SystemInfo); Page_size = SystemInfo.dwPageSize; + Core_count = SystemInfo.dwNumberOfProcessors; } size_t utils_get_page_size(void) { - utils_init_once(&Page_size_is_initialized, _utils_get_page_size); + utils_init_once(&System_info_is_initialized, _utils_get_system_info); return Page_size; } +unsigned utils_get_num_cores(void) { + utils_init_once(&System_info_is_initialized, _utils_get_system_info); + return Core_count; +} + int utils_getpid(void) { return GetCurrentProcessId(); } int utils_gettid(void) { return GetCurrentThreadId(); } diff --git a/test/pools/jemalloc_pool.cpp b/test/pools/jemalloc_pool.cpp index 8112f36bf4..69c4cf1a81 100644 --- a/test/pools/jemalloc_pool.cpp +++ b/test/pools/jemalloc_pool.cpp @@ -55,16 +55,39 @@ umf_result_t destroyFixedMemoryProviderParams(void *params) { (umf_fixed_memory_provider_params_handle_t)params); } +template void *createJemallocParams() { + umf_jemalloc_pool_params_handle_t params = nullptr; + auto ret = umfJemallocPoolParamsCreate(¶ms); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + if constexpr (arenas != 0) { + ret = umfJemallocPoolParamsSetNumArenas(params, arenas); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + } + return params; +} + +umf_result_t destroyJemallocParams(void *params) { + return umfJemallocPoolParamsDestroy( + (umf_jemalloc_pool_params_handle_t)params); +} + INSTANTIATE_TEST_SUITE_P( jemallocPoolTest, umfPoolTest, - ::testing::Values(poolCreateExtParams{umfJemallocPoolOps(), nullptr, - nullptr, umfOsMemoryProviderOps(), - createOsMemoryProviderParams, - destroyOsMemoryProviderParams}, - poolCreateExtParams{umfJemallocPoolOps(), nullptr, - nullptr, umfFixedMemoryProviderOps(), - createFixedMemoryProviderParams, - destroyFixedMemoryProviderParams})); + ::testing::Values( + poolCreateExtParams{ + umfJemallocPoolOps(), nullptr, nullptr, umfOsMemoryProviderOps(), + createOsMemoryProviderParams, destroyOsMemoryProviderParams}, + poolCreateExtParams{ + umfJemallocPoolOps(), nullptr, nullptr, umfFixedMemoryProviderOps(), + createFixedMemoryProviderParams, destroyFixedMemoryProviderParams}, + poolCreateExtParams{umfJemallocPoolOps(), createJemallocParams, + destroyJemallocParams, umfOsMemoryProviderOps(), + createOsMemoryProviderParams, + destroyOsMemoryProviderParams}, + poolCreateExtParams{umfJemallocPoolOps(), createJemallocParams<1>, + destroyJemallocParams, umfOsMemoryProviderOps(), + createOsMemoryProviderParams, + destroyOsMemoryProviderParams})); // this test makes sure that jemalloc does not use // memory provider to allocate metadata (and hence @@ -119,3 +142,48 @@ TEST_F(test, metadataNotAllocatedUsingProvider) { [pool = pool.get()](void *ptr) { umfPoolFree(pool, ptr); }); } } + +TEST_F(test, jemallocPoolNullParams) { + auto ret = umfJemallocPoolParamsSetNumArenas(NULL, 1); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); +} + +TEST_F(test, jemallocPoolParams) { + umf_jemalloc_pool_params_handle_t params = nullptr; + auto ret = umfJemallocPoolParamsCreate(¶ms); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + + ret = umfJemallocPoolParamsSetNumArenas(params, 1); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + + ret = umfJemallocPoolParamsDestroy(params); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); +} + +TEST_F(test, jemallocPoolParamsInvalid) { + umf_jemalloc_pool_params_handle_t params = nullptr; + auto ret = umfJemallocPoolParamsCreate(¶ms); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + + ret = umfJemallocPoolParamsSetNumArenas(params, SIZE_MAX); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_os_memory_provider_params_handle_t provider_params = nullptr; + ret = umfOsMemoryProviderParamsCreate(&provider_params); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + umf_memory_provider_handle_t provider; + ret = umfMemoryProviderCreate(umfOsMemoryProviderOps(), provider_params, + &provider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_pool_handle_t pool; + ret = umfPoolCreate(umfJemallocPoolOps(), provider, params, 0, &pool); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + umfMemoryProviderDestroy(provider); + + ret = umfJemallocPoolParamsDestroy(params); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + ret = umfOsMemoryProviderParamsDestroy(provider_params); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); +} diff --git a/test/provider_file_memory_ipc.cpp b/test/provider_file_memory_ipc.cpp index 90623a1793..0408b2fe23 100644 --- a/test/provider_file_memory_ipc.cpp +++ b/test/provider_file_memory_ipc.cpp @@ -67,6 +67,32 @@ umf_result_t destroyFileParamsFSDAX(void *params) { (umf_file_memory_provider_params_handle_t)params); } +#ifdef UMF_POOL_JEMALLOC_ENABLED +void *createJemallocParams() { + umf_jemalloc_pool_params_handle_t jemalloc_params = NULL; + umf_result_t res = umfJemallocPoolParamsCreate(&jemalloc_params); + if (res != UMF_RESULT_SUCCESS) { + throw std::runtime_error("Failed to create Jemalloc Pool params"); + } + + // This test creates multiple pools, so we need to reduce the number of arenas + // to avoid hitting the maximum arena limit on systems with many cores. + res = umfJemallocPoolParamsSetNumArenas(jemalloc_params, 1); + if (res != UMF_RESULT_SUCCESS) { + umfJemallocPoolParamsDestroy(jemalloc_params); + throw std::runtime_error("Failed to set number of arenas for Jemalloc " + "Pool params"); + } + return jemalloc_params; +} + +umf_result_t destroyJemallocParams(void *params) { + return umfJemallocPoolParamsDestroy( + (umf_jemalloc_pool_params_handle_t)params); +} + +#endif + HostMemoryAccessor hostAccessor; static std::vector ipcManyPoolsTestParamsList = { @@ -74,8 +100,9 @@ static std::vector ipcManyPoolsTestParamsList = { // {umfProxyPoolOps(), nullptr, umfFileMemoryProviderOps(), // file_params_shared.get(), &hostAccessor}, #ifdef UMF_POOL_JEMALLOC_ENABLED - {umfJemallocPoolOps(), nullptr, nullptr, umfFileMemoryProviderOps(), - createFileParamsShared, destroyFileParamsShared, &hostAccessor}, + {umfJemallocPoolOps(), createJemallocParams, destroyJemallocParams, + umfFileMemoryProviderOps(), createFileParamsShared, + destroyFileParamsShared, &hostAccessor}, #endif #ifdef UMF_POOL_SCALABLE_ENABLED {umfScalablePoolOps(), nullptr, nullptr, umfFileMemoryProviderOps(), @@ -97,8 +124,9 @@ static std::vector getIpcFsDaxTestParamsList(void) { // {umfProxyPoolOps(), nullptr, umfFileMemoryProviderOps(), // file_params_fsdax.get(), &hostAccessor}, #ifdef UMF_POOL_JEMALLOC_ENABLED - {umfJemallocPoolOps(), nullptr, nullptr, umfFileMemoryProviderOps(), - createFileParamsFSDAX, destroyFileParamsFSDAX, &hostAccessor}, + {umfJemallocPoolOps(), createJemallocParams, destroyJemallocParams, + umfFileMemoryProviderOps(), createFileParamsFSDAX, + destroyFileParamsFSDAX, &hostAccessor}, #endif #ifdef UMF_POOL_SCALABLE_ENABLED {umfScalablePoolOps(), nullptr, nullptr, umfFileMemoryProviderOps(), diff --git a/test/provider_os_memory.cpp b/test/provider_os_memory.cpp index f3552b9236..11a546398b 100644 --- a/test/provider_os_memory.cpp +++ b/test/provider_os_memory.cpp @@ -461,14 +461,40 @@ umf_result_t destroyDisjointPoolParams(void *params) { static_cast(params)); } +#ifdef UMF_POOL_JEMALLOC_ENABLED +void *createJemallocParams() { + umf_jemalloc_pool_params_handle_t jemalloc_params = NULL; + umf_result_t res = umfJemallocPoolParamsCreate(&jemalloc_params); + if (res != UMF_RESULT_SUCCESS) { + throw std::runtime_error("Failed to create Jemalloc Pool params"); + } + + // This test creates multiple pools, so we need to reduce the number of arenas + // to avoid hitting the maximum arena limit on systems with many cores. + res = umfJemallocPoolParamsSetNumArenas(jemalloc_params, 1); + if (res != UMF_RESULT_SUCCESS) { + umfJemallocPoolParamsDestroy(jemalloc_params); + throw std::runtime_error("Failed to set number of arenas for Jemalloc " + "Pool params"); + } + return jemalloc_params; +} + +umf_result_t destroyJemallocParams(void *params) { + return umfJemallocPoolParamsDestroy( + (umf_jemalloc_pool_params_handle_t)params); +} + +#endif + static std::vector ipcTestParamsList = { {umfDisjointPoolOps(), createDisjointPoolParams, destroyDisjointPoolParams, umfOsMemoryProviderOps(), createOsMemoryProviderParamsShared, destroyOsMemoryProviderParamsShared, &hostAccessor}, #ifdef UMF_POOL_JEMALLOC_ENABLED - {umfJemallocPoolOps(), nullptr, nullptr, umfOsMemoryProviderOps(), - createOsMemoryProviderParamsShared, destroyOsMemoryProviderParamsShared, - &hostAccessor}, + {umfJemallocPoolOps(), createJemallocParams, destroyJemallocParams, + umfOsMemoryProviderOps(), createOsMemoryProviderParamsShared, + destroyOsMemoryProviderParamsShared, &hostAccessor}, #endif };