diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp index 6d7a5bb0a76..b38ff234b88 100644 --- a/c++/test/tfile.cpp +++ b/c++/test/tfile.cpp @@ -847,19 +847,6 @@ test_file_info() H5F_fspace_strategy_t out_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR; try { - // Create a file using default properties. - H5File tempfile(FILE7, H5F_ACC_TRUNC); - - // Get the file's version information. - H5F_info2_t finfo; - tempfile.getFileInfo(finfo); - verify_val(static_cast(finfo.super.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); - verify_val(static_cast(finfo.free.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); - verify_val(static_cast(finfo.sohm.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); - - // Close the file. - tempfile.close(); - // Create file creation property list. FileCreatPropList fcpl; @@ -877,80 +864,101 @@ test_file_info() verify_val(out_fsp_psize, FSP_SIZE_DEF, "FileCreatPropList::getFileSpacePagesize", __LINE__, __FILE__); - // Set various file information. - fcpl.setUserblock(F2_USERBLOCK_SIZE); - fcpl.setSizes(F2_OFFSET_SIZE, F2_LENGTH_SIZE); - fcpl.setSymk(F2_SYM_INTERN_K, F2_SYM_LEAF_K); - fcpl.setIstorek(F2_ISTORE); + // Only test this for VFDs that operate on a single file + bool default_vfd_compatible = true; + h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &default_vfd_compatible); + + if (default_vfd_compatible) { + // Create a file using default properties. + H5File tempfile(FILE7, H5F_ACC_TRUNC); - hsize_t threshold = 5; // Free space section threshold to set - bool persist = true; // Persist free-space to set - H5F_fspace_strategy_t strategy = H5F_FSPACE_STRATEGY_PAGE; + // Get the file's version information. + H5F_info2_t finfo; + tempfile.getFileInfo(finfo); + verify_val(static_cast(finfo.super.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); + verify_val(static_cast(finfo.free.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); + verify_val(static_cast(finfo.sohm.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); - fcpl.setFileSpaceStrategy(strategy, persist, threshold); - fcpl.setFileSpacePagesize(FSP_SIZE512); + // Close the file. + tempfile.close(); - // Creating a file with the non-default file creation property list - // should create a version 1 superblock + // Set various file information. + fcpl.setUserblock(F2_USERBLOCK_SIZE); + fcpl.setSizes(F2_OFFSET_SIZE, F2_LENGTH_SIZE); + fcpl.setSymk(F2_SYM_INTERN_K, F2_SYM_LEAF_K); + fcpl.setIstorek(F2_ISTORE); - // Create file with custom file creation property list. - H5File file7(FILE7, H5F_ACC_TRUNC, fcpl); + hsize_t threshold = 5; // Free space section threshold to set + hbool_t persist = true; // Persist free-space to set + H5F_fspace_strategy_t strategy = H5F_FSPACE_STRATEGY_PAGE; - // Close the file creation property list. - fcpl.close(); + fcpl.setFileSpaceStrategy(strategy, persist, threshold); + fcpl.setFileSpacePagesize(FSP_SIZE512); - // Get the file's version information. - file7.getFileInfo(finfo); - verify_val(static_cast(finfo.super.version), 2, "H5File::getFileInfo", __LINE__, __FILE__); - verify_val(static_cast(finfo.free.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); - verify_val(static_cast(finfo.sohm.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); + // Creating a file with the non-default file creation property list + // should create a version 1 superblock - // Close the file. - file7.close(); + // Create file with custom file creation property list. + H5File file7(FILE7, H5F_ACC_TRUNC, fcpl); - // Re-open the file. - file7.openFile(FILE7, H5F_ACC_RDONLY); + // Close the file creation property list. + fcpl.close(); - // Get the file's creation property list. - FileCreatPropList fcpl2 = file7.getCreatePlist(); + // Get the file's version information. + file7.getFileInfo(finfo); + verify_val(static_cast(finfo.super.version), 2, "H5File::getFileInfo", __LINE__, __FILE__); + verify_val(static_cast(finfo.free.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); + verify_val(static_cast(finfo.sohm.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); - // Get the file's version information. - file7.getFileInfo(finfo); - verify_val(static_cast(finfo.super.version), 2, "H5File::getFileInfo", __LINE__, __FILE__); - verify_val(static_cast(finfo.free.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); - verify_val(static_cast(finfo.sohm.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); + // Close the file. + file7.close(); - // Retrieve the property values & check them. - hsize_t userblock = fcpl2.getUserblock(); - verify_val(userblock, F2_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__); + // Re-open the file. + file7.openFile(FILE7, H5F_ACC_RDONLY); - size_t off_size = 0, len_size = 0; - fcpl2.getSizes(off_size, len_size); - verify_val(off_size, F2_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__); - verify_val(len_size, F2_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__); + // Get the file's creation property list. + FileCreatPropList fcpl2 = file7.getCreatePlist(); - unsigned sym_ik = 0, sym_lk = 0; - fcpl2.getSymk(sym_ik, sym_lk); - verify_val(sym_ik, F2_SYM_INTERN_K, "FileCreatPropList::getSymk", __LINE__, __FILE__); - verify_val(sym_lk, F2_SYM_LEAF_K, "FileCreatPropList::getSymk", __LINE__, __FILE__); + // Get the file's version information. + file7.getFileInfo(finfo); + verify_val(static_cast(finfo.super.version), 2, "H5File::getFileInfo", __LINE__, __FILE__); + verify_val(static_cast(finfo.free.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); + verify_val(static_cast(finfo.sohm.version), 0, "H5File::getFileInfo", __LINE__, __FILE__); - unsigned istore_ik = fcpl2.getIstorek(); - verify_val(istore_ik, F2_ISTORE, "FileCreatPropList::getIstorek", __LINE__, __FILE__); + // Retrieve the property values & check them. + hsize_t userblock = fcpl2.getUserblock(); + verify_val(userblock, F2_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__); - /* ret=H5Pget_shared_mesg_nindexes(fcpl2,&nindexes); - CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes"); - VERIFY(nindexes, MISC11_NINDEXES, "H5Pget_shared_mesg_nindexes"); - */ + size_t off_size = 0, len_size = 0; + fcpl2.getSizes(off_size, len_size); + verify_val(off_size, F2_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__); + verify_val(len_size, F2_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__); - // Get and verify the file space info from the creation property list */ - fcpl2.getFileSpaceStrategy(out_strategy, out_persist, out_threshold); - verify_val(static_cast(out_strategy), static_cast(strategy), - "FileCreatPropList::getFileSpaceStrategy", __LINE__, __FILE__); - verify_val(out_persist, persist, "FileCreatPropList::getFileSpaceStrategy", __LINE__, __FILE__); - verify_val(out_threshold, threshold, "FileCreatPropList::getFileSpaceStrategy", __LINE__, __FILE__); + unsigned sym_ik = 0, sym_lk = 0; + fcpl2.getSymk(sym_ik, sym_lk); + verify_val(sym_ik, F2_SYM_INTERN_K, "FileCreatPropList::getSymk", __LINE__, __FILE__); + verify_val(sym_lk, F2_SYM_LEAF_K, "FileCreatPropList::getSymk", __LINE__, __FILE__); - out_fsp_psize = fcpl2.getFileSpacePagesize(); - verify_val(out_fsp_psize, FSP_SIZE512, "FileCreatPropList::getFileSpacePagesize", __LINE__, __FILE__); + unsigned istore_ik = fcpl2.getIstorek(); + verify_val(istore_ik, F2_ISTORE, "FileCreatPropList::getIstorek", __LINE__, __FILE__); + + /* ret=H5Pget_shared_mesg_nindexes(fcpl2,&nindexes); + CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes"); + VERIFY(nindexes, MISC11_NINDEXES, "H5Pget_shared_mesg_nindexes"); + */ + + // Get and verify the file space info from the creation property list */ + fcpl2.getFileSpaceStrategy(out_strategy, out_persist, out_threshold); + verify_val(static_cast(out_strategy), static_cast(strategy), + "FileCreatPropList::getFileSpaceStrategy", __LINE__, __FILE__); + verify_val(out_persist, persist, "FileCreatPropList::getFileSpaceStrategy", __LINE__, __FILE__); + verify_val(out_threshold, threshold, "FileCreatPropList::getFileSpaceStrategy", __LINE__, + __FILE__); + + out_fsp_psize = fcpl2.getFileSpacePagesize(); + verify_val(out_fsp_psize, FSP_SIZE512, "FileCreatPropList::getFileSpacePagesize", __LINE__, + __FILE__); + } PASSED(); } // end of try block diff --git a/c++/test/th5s.cpp b/c++/test/th5s.cpp index 9276a7c330e..2b46d58080f 100644 --- a/c++/test/th5s.cpp +++ b/c++/test/th5s.cpp @@ -153,37 +153,40 @@ test_h5s_basic() { } // do nothing, exception expected - /* - * Try reading a file that has been prepared that has a dataset with a - * higher dimensionality than what the library can handle. - * - * If this test fails and the H5S_MAX_RANK variable has changed, follow - * the instructions in space_overflow.c for regenating the th5s.h5 file. - */ - char *tmp_str = new char[TESTFILE.length() + 1]; - strcpy(tmp_str, TESTFILE.c_str()); - const char *testfile = H5_get_srcdir_filename(tmp_str); - delete[] tmp_str; - - // Create file - H5File fid1(testfile, H5F_ACC_RDONLY); - - // Try to open the dataset that has higher dimensionality than - // what the library can handle and this operation should fail. - try { - DataSet dset1 = fid1.openDataSet("dset"); - - // Should FAIL but didn't, so throw an invalid action exception - throw InvalidActionException( - "H5File::openDataSet", - "Opening a dataset with higher dimensionality than what the library can handle"); + // Only test this for VFDs that operate on a single file + bool default_vfd_compatible = true; + h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &default_vfd_compatible); + + if (default_vfd_compatible) { + /* + * Try reading a file that has been prepared that has a dataset with a + * higher dimensionality than what the library can handle. + * + * If this test fails and the H5S_MAX_RANK variable has changed, follow + * the instructions in space_overflow.c for regenating the th5s.h5 file. + */ + char *tmp_str = new char[TESTFILE.length() + 1]; + strcpy(tmp_str, TESTFILE.c_str()); + const char *testfile = H5_get_srcdir_filename(tmp_str); + delete[] tmp_str; + + // Create file + H5File fid1(testfile, H5F_ACC_RDONLY); + + // Try to open the dataset that has higher dimensionality than + // what the library can handle and this operation should fail. + try { + DataSet dset1 = fid1.openDataSet("dset"); + + // Should FAIL but didn't, so throw an invalid action exception + throw InvalidActionException( + "H5File::openDataSet", + "Opening a dataset with higher dimensionality than what the library can handle"); + } + catch (FileIException &E) // catching higher dimensionality dataset + { + } // do nothing, exception expected } - catch (FileIException &E) // catching higher dimensionality dataset - { - } // do nothing, exception expected - - // CHECK_I(ret, "H5Fclose"); // leave this here, later, fake a failure - // in the p_close see how this will handle it. - BMR // When running in valgrind, this PASSED macro will be missed PASSED(); diff --git a/config/cmake/HDF5DeveloperBuild.cmake b/config/cmake/HDF5DeveloperBuild.cmake index 87fd0302d47..a9ef99e2afe 100644 --- a/config/cmake/HDF5DeveloperBuild.cmake +++ b/config/cmake/HDF5DeveloperBuild.cmake @@ -161,11 +161,10 @@ if (HDF5_ENABLE_DEBUG_H5TS) list (APPEND HDF5_DEBUG_APIS H5TS_DEBUG) endif () -# If HDF5 free list debugging wasn't specifically enabled, disable -# free lists entirely for developer build modes, as they can -# make certain types of issues (like references to stale pointers) -# much more difficult to debug -if (NOT HDF5_ENABLE_DEBUG_H5FL) +# Option to control internal free list use. Enabled by default +option (HDF5_ENABLE_FREE_LISTS "Enable memory free lists" ON) +mark_as_advanced (HDF5_ENABLE_FREE_LISTS) +if (NOT HDF5_ENABLE_FREE_LISTS) list (APPEND HDF5_DEVELOPER_DEFS H5_NO_FREE_LISTS) endif () diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index cf71890c450..c8408e337ec 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -869,6 +869,7 @@ set (H5TS_SOURCES ${HDF5_SRC_DIR}/H5TSbarrier.c ${HDF5_SRC_DIR}/H5TSc11.c ${HDF5_SRC_DIR}/H5TScond.c + ${HDF5_SRC_DIR}/H5TSdlftt_mutex.c ${HDF5_SRC_DIR}/H5TSint.c ${HDF5_SRC_DIR}/H5TSkey.c ${HDF5_SRC_DIR}/H5TSmutex.c diff --git a/src/H5.c b/src/H5.c index 839edf76b55..761364a8009 100644 --- a/src/H5.c +++ b/src/H5.c @@ -257,6 +257,10 @@ H5_init_library(void) * default FAPL. * */ +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_init() < 0) + HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL, "unable to initialize free list interface"); +#endif if (H5E_init() < 0) HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL, "unable to initialize error interface"); if (H5FD_init() < 0) diff --git a/src/H5Cimage.c b/src/H5Cimage.c index ded37100bb2..b62e32ce319 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -130,7 +130,7 @@ static void H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_ static herr_t H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr); static herr_t H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr); static herr_t H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr); -static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t *buf_size, +static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t buf_size, const uint8_t **buf); static herr_t H5C__write_cache_image_superblock_msg(H5F_t *f, bool create); static herr_t H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr); @@ -2376,7 +2376,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) { H5C_cache_entry_t *pf_entry_ptr; /* Pointer to prefetched entry */ H5C_cache_entry_t *parent_ptr; /* Pointer to parent of prefetched entry */ - hsize_t image_len; /* Image length */ const uint8_t *p; /* Pointer into image buffer */ unsigned u, v; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -2392,11 +2391,10 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) assert(cache_ptr->image_len > 0); /* Decode metadata cache image header */ - p = (uint8_t *)cache_ptr->image_buffer; - image_len = cache_ptr->image_len; - if (H5C__decode_cache_image_header(f, cache_ptr, &p, image_len + 1) < 0) + p = (uint8_t *)cache_ptr->image_buffer; + if (H5C__decode_cache_image_header(f, cache_ptr, &p, cache_ptr->image_len + 1) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTDECODE, FAIL, "cache image header decode failed"); - assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < image_len); + assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_len); /* The image_data_len and # of entries should be defined now */ assert(cache_ptr->image_data_len > 0); @@ -2408,7 +2406,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) /* Create the prefetched entry described by the ith * entry in cache_ptr->image_entrise. */ - if (NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, &image_len, &p))) + if (NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, cache_ptr->image_len, &p))) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "reconstruction of cache entry failed"); /* Note that we make no checks on available cache space before @@ -2564,7 +2562,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) *------------------------------------------------------------------------- */ static H5C_cache_entry_t * -H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t *buf_size, const uint8_t **buf) +H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t buf_size, const uint8_t **buf) { H5C_cache_entry_t *pf_entry_ptr = NULL; /* Reconstructed cache entry */ uint8_t flags = 0; @@ -2577,8 +2575,8 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t *buf_size #endif bool file_is_rw; const uint8_t *p; - const uint8_t *p_end = *buf + *buf_size - 1; /* Pointer to last valid byte in buffer */ - H5C_cache_entry_t *ret_value = NULL; /* Return value */ + const uint8_t *p_end = *buf + buf_size - 1; /* Pointer to last valid byte in buffer */ + H5C_cache_entry_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -2746,8 +2744,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t *buf_size /* Sanity checks */ assert(pf_entry_ptr->size > 0 && pf_entry_ptr->size < H5C_MAX_ENTRY_SIZE); - /* Update buffer pointer and buffer len */ - *buf_size -= (hsize_t)(p - *buf); + /* Update buffer pointer */ *buf = p; ret_value = pf_entry_ptr; diff --git a/src/H5FL.c b/src/H5FL.c index 087017e03e9..575bab14053 100644 --- a/src/H5FL.c +++ b/src/H5FL.c @@ -21,32 +21,80 @@ * move frequently accessed free lists to the head of the queue. */ -#include "H5FLmodule.h" /* This source code file is part of the H5FL module */ +/****************/ +/* Module Setup */ +/****************/ -/* #define H5FL_DEBUG */ +#include "H5FLmodule.h" /* This source code file is part of the H5FL module */ +/***********/ +/* Headers */ +/***********/ #include "H5private.h" /* Generic Functions */ #include "H5Eprivate.h" /* Error handling */ -#include "H5FLprivate.h" /* Free Lists */ +#include "H5FLprivate.h" /* Free Lists */ #include "H5MMprivate.h" /* Memory management */ +#include "H5TSprivate.h" /* Threadsafety */ -/* - * Private type definitions - */ +/****************/ +/* Local Macros */ +/****************/ + +/* #define H5FL_DEBUG */ + +#define H5FL_REG_GLB_MEM_LIM (1 * 1024 * 1024) /* Default to 1MB limit on all regular free lists */ +#define H5FL_REG_LST_MEM_LIM (1 * 65536) /* Default to 64KB limit on each regular free list */ +#define H5FL_ARR_GLB_MEM_LIM (4 * 1024 * 1024) /* Default to 4MB limit on all array free lists */ +#define H5FL_ARR_LST_MEM_LIM (4 * 65536) /* Default to 256KB limit on each array free list */ +#define H5FL_BLK_GLB_MEM_LIM (16 * 1024 * 1024) /* Default to 16MB limit on all block free lists */ +#define H5FL_BLK_LST_MEM_LIM (1024 * 1024) /* Default to 1024KB (1MB) limit on each block free list */ +#define H5FL_FAC_GLB_MEM_LIM (16 * 1024 * 1024) /* Default to 16MB limit on all factory free lists */ +#define H5FL_FAC_LST_MEM_LIM (1024 * 1024) /* Default to 1024KB (1MB) limit on each factory free list */ + +/******************/ +/* Local Typedefs */ +/******************/ + +/********************/ +/* Local Prototypes */ +/********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*********************/ +/* Package Variables */ +/*********************/ + +/*******************/ +/* Local Variables */ +/*******************/ /* - Default limits on how much memory can accumulate on each free list before - it is garbage collected. + * Default limits on how much memory can accumulate on each free list before + * it is garbage collected. */ -static size_t H5FL_reg_glb_mem_lim = 1 * 1024 * 1024; /* Default to 1MB limit on all regular free lists */ -static size_t H5FL_reg_lst_mem_lim = 1 * 65536; /* Default to 64KB limit on each regular free list */ -static size_t H5FL_arr_glb_mem_lim = 4 * 1024 * 1024; /* Default to 4MB limit on all array free lists */ -static size_t H5FL_arr_lst_mem_lim = 4 * 65536; /* Default to 256KB limit on each array free list */ -static size_t H5FL_blk_glb_mem_lim = 16 * 1024 * 1024; /* Default to 16MB limit on all block free lists */ -static size_t H5FL_blk_lst_mem_lim = 1024 * 1024; /* Default to 1024KB (1MB) limit on each block free list */ -static size_t H5FL_fac_glb_mem_lim = 16 * 1024 * 1024; /* Default to 16MB limit on all factory free lists */ -static size_t H5FL_fac_lst_mem_lim = - 1024 * 1024; /* Default to 1024KB (1MB) limit on each factory free list */ +#ifdef H5_HAVE_CONCURRENCY +static bool H5TS_limits_init = false; +static H5TS_atomic_size_t H5FL_reg_glb_mem_lim; +static H5TS_atomic_size_t H5FL_reg_lst_mem_lim; +static H5TS_atomic_size_t H5FL_arr_glb_mem_lim; +static H5TS_atomic_size_t H5FL_arr_lst_mem_lim; +static H5TS_atomic_size_t H5FL_blk_glb_mem_lim; +static H5TS_atomic_size_t H5FL_blk_lst_mem_lim; +static H5TS_atomic_size_t H5FL_fac_glb_mem_lim; +static H5TS_atomic_size_t H5FL_fac_lst_mem_lim; +#else /* H5_HAVE_CONCURRENCY */ +static size_t H5FL_reg_glb_mem_lim = H5FL_REG_GLB_MEM_LIM; +static size_t H5FL_reg_lst_mem_lim = H5FL_REG_LST_MEM_LIM; +static size_t H5FL_arr_glb_mem_lim = H5FL_ARR_GLB_MEM_LIM; +static size_t H5FL_arr_lst_mem_lim = H5FL_ARR_LST_MEM_LIM; +static size_t H5FL_blk_glb_mem_lim = H5FL_BLK_GLB_MEM_LIM; +static size_t H5FL_blk_lst_mem_lim = H5FL_BLK_LST_MEM_LIM; +static size_t H5FL_fac_glb_mem_lim = H5FL_FAC_GLB_MEM_LIM; +static size_t H5FL_fac_lst_mem_lim = H5FL_FAC_LST_MEM_LIM; +#endif /* H5_HAVE_CONCURRENCY */ /* A garbage collection node for regular free lists */ typedef struct H5FL_reg_gc_node_t { @@ -56,12 +104,18 @@ typedef struct H5FL_reg_gc_node_t { /* The garbage collection head for regular free lists */ typedef struct H5FL_reg_gc_list_t { - size_t mem_freed; /* Amount of free memory on list */ +#ifdef H5_HAVE_CONCURRENCY + bool init; /* Whether the mutex has been initialized */ + H5TS_dlftt_mutex_t mutex; /* Guard access to the list of free lists */ + H5TS_atomic_size_t mem_freed; /* Amount of free memory on list */ +#else /* H5_HAVE_CONCURRENCY */ + size_t mem_freed; /* Amount of free memory on list */ +#endif /* H5_HAVE_CONCURRENCY */ struct H5FL_reg_gc_node_t *first; /* Pointer to the first node in the list of things to garbage collect */ } H5FL_reg_gc_list_t; /* The head of the list of things to garbage collect */ -static H5FL_reg_gc_list_t H5FL_reg_gc_head = {0, NULL}; +static H5FL_reg_gc_list_t H5FL_reg_gc_head; /* A garbage collection node for array free lists */ typedef struct H5FL_gc_arr_node_t { @@ -71,12 +125,18 @@ typedef struct H5FL_gc_arr_node_t { /* The garbage collection head for array free lists */ typedef struct H5FL_gc_arr_list_t { - size_t mem_freed; /* Amount of free memory on list */ +#ifdef H5_HAVE_CONCURRENCY + bool init; /* Whether the mutex has been initialized */ + H5TS_dlftt_mutex_t mutex; /* Guard access to the list of free lists */ + H5TS_atomic_size_t mem_freed; /* Amount of free memory on list */ +#else /* H5_HAVE_CONCURRENCY */ + size_t mem_freed; /* Amount of free memory on list */ +#endif /* H5_HAVE_CONCURRENCY */ struct H5FL_gc_arr_node_t *first; /* Pointer to the first node in the list of things to garbage collect */ } H5FL_gc_arr_list_t; /* The head of the list of array things to garbage collect */ -static H5FL_gc_arr_list_t H5FL_arr_gc_head = {0, NULL}; +static H5FL_gc_arr_list_t H5FL_arr_gc_head; /* A garbage collection node for blocks */ typedef struct H5FL_blk_gc_node_t { @@ -86,12 +146,18 @@ typedef struct H5FL_blk_gc_node_t { /* The garbage collection head for blocks */ typedef struct H5FL_blk_gc_list_t { - size_t mem_freed; /* Amount of free memory on list */ +#ifdef H5_HAVE_CONCURRENCY + bool init; /* Whether the mutex has been initialized */ + H5TS_dlftt_mutex_t mutex; /* Guard access to the list of free lists */ + H5TS_atomic_size_t mem_freed; /* Amount of free memory on list */ +#else /* H5_HAVE_CONCURRENCY */ + size_t mem_freed; /* Amount of free memory on list */ +#endif /* H5_HAVE_CONCURRENCY */ struct H5FL_blk_gc_node_t *first; /* Pointer to the first node in the list of things to garbage collect */ } H5FL_blk_gc_list_t; /* The head of the list of PQs to garbage collect */ -static H5FL_blk_gc_list_t H5FL_blk_gc_head = {0, NULL}; +static H5FL_blk_gc_list_t H5FL_blk_gc_head; /* A garbage collection node for factory free lists */ struct H5FL_fac_gc_node_t { @@ -101,7 +167,14 @@ struct H5FL_fac_gc_node_t { /* The garbage collection head for factory free lists */ typedef struct H5FL_fac_gc_list_t { - size_t mem_freed; /* Amount of free memory on list */ +#ifdef H5_HAVE_CONCURRENCY + bool init; /* Whether the mutex has been initialized */ + H5TS_dlftt_mutex_t mutex; /* Guard access to this free list */ + H5TS_atomic_size_t mem_freed; /* Amount of free memory on list */ +#else /* H5_HAVE_CONCURRENCY */ + size_t mem_freed; /* Amount of free memory on list */ +#endif /* H5_HAVE_CONCURRENCY */ + struct H5FL_fac_gc_node_t *first; /* Pointer to the first node in the list of things to garbage collect */ } H5FL_fac_gc_list_t; @@ -114,7 +187,7 @@ struct H5FL_fac_node_t { bool H5_PKG_INIT_VAR = false; /* The head of the list of factory things to garbage collect */ -static H5FL_fac_gc_list_t H5FL_fac_gc_head = {0, NULL}; +static H5FL_fac_gc_list_t H5FL_fac_gc_head; /* Forward declarations of local static functions */ static void *H5FL__malloc(size_t mem_size); @@ -145,6 +218,68 @@ H5FL_DEFINE_STATIC(H5FL_fac_gc_node_t); /* Declare a free list to manage the H5FL_fac_head_t struct */ H5FL_DEFINE(H5FL_fac_head_t); +#ifdef H5_HAVE_CONCURRENCY +/*------------------------------------------------------------------------- + * Function: H5FL_init + * + * Purpose: Initialize the interface from some other layer. + * + * Return: Success: non-negative + * Failure: negative + *------------------------------------------------------------------------- + */ +herr_t +H5FL_init(void) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Initialize the global & per-list limit atomic variables */ + assert(!H5TS_limits_init); + H5TS_atomic_init_size_t(&H5FL_reg_glb_mem_lim, H5FL_REG_GLB_MEM_LIM); + H5TS_atomic_init_size_t(&H5FL_reg_lst_mem_lim, H5FL_REG_LST_MEM_LIM); + H5TS_atomic_init_size_t(&H5FL_arr_glb_mem_lim, H5FL_ARR_GLB_MEM_LIM); + H5TS_atomic_init_size_t(&H5FL_arr_lst_mem_lim, H5FL_ARR_LST_MEM_LIM); + H5TS_atomic_init_size_t(&H5FL_blk_glb_mem_lim, H5FL_BLK_GLB_MEM_LIM); + H5TS_atomic_init_size_t(&H5FL_blk_lst_mem_lim, H5FL_BLK_LST_MEM_LIM); + H5TS_atomic_init_size_t(&H5FL_fac_glb_mem_lim, H5FL_FAC_GLB_MEM_LIM); + H5TS_atomic_init_size_t(&H5FL_fac_lst_mem_lim, H5FL_FAC_LST_MEM_LIM); + H5TS_limits_init = true; + + /* Initialize the 'reg' list of lists */ + assert(!H5FL_reg_gc_head.init); + if (H5TS_dlftt_mutex_init(&H5FL_reg_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize list of free lists' mutex"); + H5TS_atomic_init_size_t(&H5FL_reg_gc_head.mem_freed, 0); + H5FL_reg_gc_head.init = true; + + /* Initialize the 'blk' list of lists */ + assert(!H5FL_blk_gc_head.init); + if (H5TS_dlftt_mutex_init(&H5FL_blk_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize list of free lists' mutex"); + H5TS_atomic_init_size_t(&H5FL_blk_gc_head.mem_freed, 0); + H5FL_blk_gc_head.init = true; + + /* Initialize the 'arr' list of lists */ + assert(!H5FL_arr_gc_head.init); + if (H5TS_dlftt_mutex_init(&H5FL_arr_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize list of free lists' mutex"); + H5TS_atomic_init_size_t(&H5FL_arr_gc_head.mem_freed, 0); + H5FL_arr_gc_head.init = true; + + /* Initialize the 'fac' list of lists */ + assert(!H5FL_fac_gc_head.init); + if (H5TS_dlftt_mutex_init(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize list of free lists' mutex"); + H5TS_atomic_init_size_t(&H5FL_fac_gc_head.mem_freed, 0); + H5FL_fac_gc_head.init = true; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FL_init() */ +#endif /* H5_HAVE_CONCURRENCY */ + /*-------------------------------------------------------------------------- NAME H5FL_term_package @@ -186,6 +321,23 @@ H5FL_term_package(void) H5_PKG_INIT_VAR = false; } /* end if */ +#ifdef H5_HAVE_CONCURRENCY + /* Shut down the limits */ + if (H5TS_limits_init) { + H5TS_atomic_destroy_size_t(&H5FL_reg_glb_mem_lim); + H5TS_atomic_destroy_size_t(&H5FL_reg_lst_mem_lim); + H5TS_atomic_destroy_size_t(&H5FL_arr_glb_mem_lim); + H5TS_atomic_destroy_size_t(&H5FL_arr_lst_mem_lim); + H5TS_atomic_destroy_size_t(&H5FL_blk_glb_mem_lim); + H5TS_atomic_destroy_size_t(&H5FL_blk_lst_mem_lim); + H5TS_atomic_destroy_size_t(&H5FL_fac_glb_mem_lim); + H5TS_atomic_destroy_size_t(&H5FL_fac_lst_mem_lim); + H5TS_limits_init = false; + + n++; + } +#endif /* H5_HAVE_CONCURRENCY */ + FUNC_LEAVE_NOAPI(n) } /* end H5FL_term_package() */ @@ -248,17 +400,32 @@ H5FL__reg_init(H5FL_reg_head_t *head) /* Initialize the new garbage collection node */ new_node->list = head; - /* Link in to the garbage collection list */ - new_node->next = H5FL_reg_gc_head.first; - H5FL_reg_gc_head.first = new_node; - - /* Indicate that the free list is initialized */ - head->init = true; +#ifdef H5_HAVE_CONCURRENCY + /* Initialize the mutex protecting this specific list */ + if (H5TS_dlftt_mutex_init(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Make certain that the space allocated is large enough to store a free list pointer (eventually) */ if (head->size < sizeof(H5FL_reg_node_t)) head->size = sizeof(H5FL_reg_node_t); +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_reg_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Link in to the garbage collection list */ + new_node->next = H5FL_reg_gc_head.first; + H5FL_reg_gc_head.first = new_node; + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_reg_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL__reg_init() */ @@ -275,7 +442,8 @@ H5FL__reg_init(H5FL_reg_head_t *head) void * H5FL_reg_free(H5FL_reg_head_t *head, void *obj) { - void *ret_value = NULL; /* Return value */ + unsigned onlist; /* Number of blocks on free list */ + void *ret_value = NULL; /* Return value */ /* NOINIT OK here because this must be called after H5FL_reg_malloc/calloc * -NAF */ @@ -285,13 +453,19 @@ H5FL_reg_free(H5FL_reg_head_t *head, void *obj) assert(head); assert(obj); + /* Make certain that the free list is initialized */ + assert(H5_GLOBAL_IS_INIT(head)); + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + #ifdef H5FL_DEBUG memset(obj, 255, head->size); #endif /* H5FL_DEBUG */ - /* Make certain that the free list is initialized */ - assert(head->init); - /* Link into the free list */ ((H5FL_reg_node_t *)obj)->next = head->list; @@ -299,19 +473,29 @@ H5FL_reg_free(H5FL_reg_head_t *head, void *obj) head->list = (H5FL_reg_node_t *)obj; /* Increment the number of blocks on free list */ - head->onlist++; + onlist = ++head->onlist; + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Increment the amount of "regular" freed memory globally */ - H5FL_reg_gc_head.mem_freed += head->size; + H5TS_ATOMIC_FETCH_ADD_SIZE_T(&H5FL_reg_gc_head.mem_freed, head->size); /* Check for exceeding free list memory use limits */ /* First check this particular list */ - if (head->onlist * head->size > H5FL_reg_lst_mem_lim) + if (onlist * head->size > H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_reg_lst_mem_lim)) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ if (H5FL__reg_gc_list(head) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, NULL, "garbage collection failed during free"); /* Then check the global amount memory on regular free lists */ - if (H5FL_reg_gc_head.mem_freed > H5FL_reg_glb_mem_lim) + if (H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_reg_gc_head.mem_freed) > H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_reg_glb_mem_lim)) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ if (H5FL__reg_gc() < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, NULL, "garbage collection failed during free"); @@ -340,9 +524,14 @@ H5FL_reg_malloc(H5FL_reg_head_t *head) assert(head); /* Make certain the list is initialized first */ - if (!head->init) - if (H5FL__reg_init(head) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't initialize 'regular' blocks"); + H5_GLOBAL_INIT(head, H5FL__reg_init, H5E_RESOURCE, H5E_CANTINIT, NULL, + "can't initialize 'regular' blocks"); + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Check for nodes available on the free list first */ if (head->list != NULL) { @@ -355,17 +544,42 @@ H5FL_reg_malloc(H5FL_reg_head_t *head) /* Decrement the number of blocks & memory on free list */ head->onlist--; +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Decrement the amount of global "regular" free list memory in use */ - H5FL_reg_gc_head.mem_freed -= (head->size); + H5TS_ATOMIC_FETCH_SUB_SIZE_T(&H5FL_reg_gc_head.mem_freed, head->size); } /* end if */ /* Otherwise allocate a node */ else { +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Allocate new memory */ if (NULL == (ret_value = H5FL__malloc(head->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Increment the number of blocks allocated in list */ head->allocated++; - } /* end else */ + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -415,9 +629,24 @@ H5FL_reg_calloc(H5FL_reg_head_t *head) static herr_t H5FL__reg_gc_list(H5FL_reg_head_t *head) { - H5FL_reg_node_t *free_list; /* Pointer to nodes in free list being garbage collected */ + H5FL_reg_node_t *free_list; /* Pointer to nodes in free list being garbage collected */ + unsigned onlist; /* Number of blocks on free list */ + herr_t ret_value = SUCCEED; /* Return value*/ +#ifdef H5_HAVE_CONCURRENCY + FUNC_ENTER_PACKAGE +#else /* H5_HAVE_CONCURRENCY */ FUNC_ENTER_PACKAGE_NOERR +#endif /* H5_HAVE_CONCURRENCY */ + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Save for later */ + onlist = head->onlist; /* For each free list being garbage collected, walk through the nodes and free them */ free_list = head->list; @@ -437,14 +666,23 @@ H5FL__reg_gc_list(H5FL_reg_head_t *head) /* Decrement the count of nodes allocated and free the node */ head->allocated -= head->onlist; - /* Decrement global count of free memory on "regular" lists */ - H5FL_reg_gc_head.mem_freed -= (head->onlist * head->size); - /* Indicate no free nodes on the free list */ head->list = NULL; head->onlist = 0; - FUNC_LEAVE_NOAPI(SUCCEED) +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Decrement global count of free memory on "regular" lists */ + H5TS_ATOMIC_FETCH_SUB_SIZE_T(&H5FL_reg_gc_head.mem_freed, (onlist * head->size)); + +#ifdef H5_HAVE_CONCURRENCY +done: +#endif /* H5_HAVE_CONCURRENCY */ + FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL__reg_gc_list() */ /*------------------------------------------------------------------------- @@ -465,19 +703,30 @@ H5FL__reg_gc(void) FUNC_ENTER_PACKAGE - /* Walk through all the free lists, free()'ing the nodes */ - gc_node = H5FL_reg_gc_head.first; - while (gc_node != NULL) { - /* Release the free nodes on the list */ - if (H5FL__reg_gc_list(gc_node->list) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of list failed"); +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_reg_gc_head.init) { + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_reg_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ - /* Go on to the next free list to garbage collect */ - gc_node = gc_node->next; - } /* end while */ + /* Walk through all the free lists, free()'ing the nodes */ + gc_node = H5FL_reg_gc_head.first; + while (gc_node != NULL) { + /* Release the free nodes on the list */ + if (H5FL__reg_gc_list(gc_node->list) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of list failed"); + + /* Go on to the next free list to garbage collect */ + gc_node = gc_node->next; + } /* end while */ - /* Double check that all the memory on the free lists is recycled */ - assert(H5FL_reg_gc_head.mem_freed == 0); +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_reg_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -510,44 +759,66 @@ H5FL__reg_gc(void) static int H5FL__reg_term(void) { - H5FL_reg_gc_node_t *left; /* pointer to garbage collection lists with work left */ + H5FL_reg_gc_node_t *left = NULL; /* pointer to garbage collection lists with work left */ FUNC_ENTER_PACKAGE_NOERR - /* Free the nodes on the garbage collection list, keeping nodes with allocations outstanding */ - left = NULL; - while (H5FL_reg_gc_head.first != NULL) { - H5FL_reg_gc_node_t *tmp; /* Temporary pointer to a garbage collection node */ +#ifdef H5_HAVE_CONCURRENCY + /* Have the regular free lists been initialized? */ + if (H5FL_reg_gc_head.init) { +#endif /* H5_HAVE_CONCURRENCY */ - /* Get a copy of the next node */ - tmp = H5FL_reg_gc_head.first->next; + /* Free the nodes on the garbage collection list, keeping nodes with allocations outstanding */ + while (H5FL_reg_gc_head.first != NULL) { + H5FL_reg_gc_node_t *tmp; /* Temporary pointer to a garbage collection node */ + + /* Get a copy of the next node */ + tmp = H5FL_reg_gc_head.first->next; #ifdef H5FL_DEBUG - printf("%s: head->name = %s, head->allocated = %d\n", __func__, H5FL_reg_gc_head.first->list->name, - (int)H5FL_reg_gc_head.first->list->allocated); + printf("%s: head->name = %s, head->allocated = %d\n", __func__, + H5FL_reg_gc_head.first->list->name, (int)H5FL_reg_gc_head.first->list->allocated); #endif /* H5FL_DEBUG */ - /* Check if the list has allocations outstanding */ - if (H5FL_reg_gc_head.first->list->allocated > 0) { - /* Add free list to the list of nodes with allocations open still */ - H5FL_reg_gc_head.first->next = left; - left = H5FL_reg_gc_head.first; - } /* end if */ - /* No allocations left open for list, get rid of it */ - else { - /* Reset the "initialized" flag, in case we restart this list somehow (I don't know how..) */ - H5FL_reg_gc_head.first->list->init = false; + /* Check if the list has allocations outstanding */ + if (H5FL_reg_gc_head.first->list->allocated > 0) { + /* Add free list to the list of nodes with allocations open still */ + H5FL_reg_gc_head.first->next = left; + left = H5FL_reg_gc_head.first; + } /* end if */ + /* No allocations left open for list, get rid of it */ + else { +#ifdef H5_HAVE_CONCURRENCY + /* Destroy the mutex protecting this list */ + H5TS_dlftt_mutex_destroy(&H5FL_reg_gc_head.first->list->mutex); +#endif /* H5_HAVE_CONCURRENCY */ - /* Free the node from the garbage collection list */ - H5MM_xfree(H5FL_reg_gc_head.first); - } /* end else */ + /* Reset the "initialized" flag, in case we restart this list */ + H5_GLOBAL_SET_INIT(H5FL_reg_gc_head.first->list, false); - H5FL_reg_gc_head.first = tmp; - } /* end while */ + /* Free the node from the garbage collection list */ + H5MM_xfree(H5FL_reg_gc_head.first); + } /* end else */ + + H5FL_reg_gc_head.first = tmp; + } /* end while */ + + /* Point to the list of nodes left with allocations open, if any */ + H5FL_reg_gc_head.first = left; - /* Point to the list of nodes left with allocations open, if any */ - H5FL_reg_gc_head.first = left; +#ifdef H5_HAVE_CONCURRENCY + /* Check for all lists being shut down */ + if (NULL == left) { + /* Destroy concurrency objects */ + H5TS_dlftt_mutex_destroy(&H5FL_reg_gc_head.mutex); + H5TS_ATOMIC_DESTROY_SIZE_T(&H5FL_reg_gc_head.mem_freed); - FUNC_LEAVE_NOAPI(H5FL_reg_gc_head.first != NULL ? 1 : 0) + /* Reset init flag */ + H5FL_reg_gc_head.init = false; + } + } +#endif /* H5_HAVE_CONCURRENCY */ + + FUNC_LEAVE_NOAPI(left != NULL ? 1 : 0) } /* end H5FL__reg_term() */ /*------------------------------------------------------------------------- @@ -611,7 +882,7 @@ H5FL__blk_find_list(H5FL_blk_node_t **head, size_t size) * Function: H5FL__blk_create_list * * Purpose: Creates a new free list for blocks of the given size at the - * head of the priority queue. + * head of the priority queue. * * Return: Success: valid pointer to the free list node * @@ -650,7 +921,7 @@ H5FL__blk_create_list(H5FL_blk_node_t **head, size_t size) * Function: H5FL__blk_init * * Purpose: Initialize a priority queue of a certain type. Right now, this just - * adds the PQ to the list of things to garbage collect. + * adds the PQ to the list of things to garbage collect. * * Return: Success: Non-negative * Failure: Negative @@ -672,12 +943,27 @@ H5FL__blk_init(H5FL_blk_head_t *head) /* Initialize the new garbage collection node */ new_node->pq = head; +#ifdef H5_HAVE_CONCURRENCY + /* Initialize the mutex protecting this specific list */ + if (H5TS_dlftt_mutex_init(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_blk_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Link in to the garbage collection list */ new_node->next = H5FL_blk_gc_head.first; H5FL_blk_gc_head.first = new_node; - /* Indicate that the PQ is initialized */ - head->init = true; +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_blk_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -687,7 +973,7 @@ H5FL__blk_init(H5FL_blk_head_t *head) * Function: H5FL_blk_free_block_avail * * Purpose: Checks if a free block of the appropriate size is available - * for a given list. + * for a given list. * * Return: Success: non-negative * Failure: negative @@ -700,18 +986,35 @@ H5FL_blk_free_block_avail(H5FL_blk_head_t *head, size_t size) H5FL_blk_node_t *free_list; /* The free list of nodes of correct size */ htri_t ret_value = FAIL; /* Return value */ +#ifdef H5_HAVE_CONCURRENCY + FUNC_ENTER_NOAPI(FAIL) +#else /* H5_HAVE_CONCURRENCY */ FUNC_ENTER_NOAPI_NOERR +#endif /* H5_HAVE_CONCURRENCY */ /* Double check parameters */ assert(head); +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* check if there is a free list for blocks of this size */ /* and if there are any blocks available on the list */ - if ((free_list = H5FL__blk_find_list(&(head->head), size)) != NULL && free_list->list != NULL) + if ((free_list = H5FL__blk_find_list(&(head->pq), size)) != NULL && free_list->list != NULL) ret_value = true; else ret_value = false; +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list's mutex"); + +done: +#endif /* H5_HAVE_CONCURRENCY */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL_blk_free_block_avail() */ @@ -719,8 +1022,8 @@ H5FL_blk_free_block_avail(H5FL_blk_head_t *head, size_t size) * Function: H5FL_blk_malloc * * Purpose: Allocates memory for a block. This routine is used - * instead of malloc because the block can be kept on a free list so - * they don't thrash malloc/free as much. + * instead of malloc because the block can be kept on a free list so + * they don't thrash malloc/free as much. * * Return: Success: valid pointer to the block * @@ -731,9 +1034,12 @@ H5FL_blk_free_block_avail(H5FL_blk_head_t *head, size_t size) void * H5FL_blk_malloc(H5FL_blk_head_t *head, size_t size) { - H5FL_blk_node_t *free_list; /* The free list of nodes of correct size */ - H5FL_blk_list_t *temp; /* Temp. ptr to the new native list allocated */ - void *ret_value = NULL; /* Pointer to the block to return to the user */ + H5FL_blk_node_t *free_list; /* The free list of nodes of correct size */ + H5FL_blk_list_t *temp; /* Temp. ptr to the new native list allocated */ +#ifdef H5_HAVE_CONCURRENCY + bool have_mutex = false; /* Whether we're holding the list's mutex */ +#endif /* H5_HAVE_CONCURRENCY */ + void *ret_value = NULL; /* Pointer to the block to return to the user */ FUNC_ENTER_NOAPI(NULL) @@ -742,13 +1048,18 @@ H5FL_blk_malloc(H5FL_blk_head_t *head, size_t size) assert(size); /* Make certain the list is initialized first */ - if (!head->init) - if (H5FL__blk_init(head) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't initialize 'block' list"); + H5_GLOBAL_INIT(head, H5FL__blk_init, H5E_RESOURCE, H5E_CANTINIT, NULL, "can't initialize 'block' list"); + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); + have_mutex = true; +#endif /* H5_HAVE_CONCURRENCY */ /* check if there is a free list for blocks of this size */ /* and if there are any blocks available on the list */ - if (NULL != (free_list = H5FL__blk_find_list(&(head->head), size)) && NULL != free_list->list) { + if (NULL != (free_list = H5FL__blk_find_list(&(head->pq), size)) && NULL != free_list->list) { /* Remove the first node from the free list */ temp = free_list->list; free_list->list = free_list->list->next; @@ -758,27 +1069,55 @@ H5FL_blk_malloc(H5FL_blk_head_t *head, size_t size) head->onlist--; head->list_mem -= size; +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); + have_mutex = false; +#endif /* H5_HAVE_CONCURRENCY */ + /* Decrement the amount of global "block" free list memory in use */ - H5FL_blk_gc_head.mem_freed -= size; + H5TS_ATOMIC_FETCH_SUB_SIZE_T(&H5FL_blk_gc_head.mem_freed, size); } /* end if */ /* No free list available, or there are no nodes on the list, allocate a new node to give to the user */ else { - /* Check if there was no free list for native blocks of this size */ - if (NULL == free_list) - /* Create a new list node and insert it to the queue */ - free_list = H5FL__blk_create_list(&(head->head), size); - assert(free_list); +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); + have_mutex = false; +#endif /* H5_HAVE_CONCURRENCY */ /* Allocate new node, with room for the page info header and the actual page data */ if (NULL == (temp = (H5FL_blk_list_t *)H5FL__malloc(sizeof(H5FL_blk_list_t) + size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for chunk"); +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); + have_mutex = true; +#endif /* H5_HAVE_CONCURRENCY */ + + /* Check (again) if there is (now) a free list for native blocks of this size */ + if (NULL == (free_list = H5FL__blk_find_list(&(head->pq), size))) + /* Create a new list node and insert it to the queue */ + if (NULL == (free_list = H5FL__blk_create_list(&(head->pq), size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for list node"); + /* Increment the number of blocks of this size */ free_list->allocated++; /* Increment the total number of blocks allocated */ head->allocated++; - } /* end else */ + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); + have_mutex = false; +#endif /* H5_HAVE_CONCURRENCY */ + } /* end else */ /* Initialize the block allocated */ temp->size = size; @@ -787,6 +1126,16 @@ H5FL_blk_malloc(H5FL_blk_head_t *head, size_t size) ret_value = ((char *)temp) + sizeof(H5FL_blk_list_t); done: +#ifdef H5_HAVE_CONCURRENCY + if (NULL == ret_value) { + /* Release the mutex protecting this list, if we're holding it */ + /* (Only happens on error) */ + if (have_mutex) + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HDONE_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ + FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL_blk_malloc() */ @@ -794,9 +1143,9 @@ H5FL_blk_malloc(H5FL_blk_head_t *head, size_t size) * Function: H5FL_blk_calloc * * Purpose: Allocates memory for a block and clear it to zeros. - * This routine is used - * instead of malloc because the block can be kept on a free list so - * they don't thrash malloc/free as much. + * This routine is used + * instead of malloc because the block can be kept on a free list so + * they don't thrash malloc/free as much. * * Return: Success: valid pointer to the block * @@ -830,8 +1179,8 @@ H5FL_blk_calloc(H5FL_blk_head_t *head, size_t size) * Function: H5FL_blk_free * * Purpose: Releases memory for a block. This routine is used - * instead of free because the blocks can be kept on a free list so - * they don't thrash malloc/free as much. + * instead of free because the blocks can be kept on a free list so + * they don't thrash malloc/free as much. * * Return: Success: NULL * @@ -845,6 +1194,7 @@ H5FL_blk_free(H5FL_blk_head_t *head, void *block) H5FL_blk_node_t *free_list; /* The free list of nodes of correct size */ H5FL_blk_list_t *temp; /* Temp. ptr to the new free list node allocated */ size_t free_size; /* Size of the block freed */ + size_t list_mem; /* Amount of memory in block on free list */ void *ret_value = NULL; /* Return value */ /* NOINIT OK here because this must be called after H5FL_blk_malloc/calloc @@ -861,14 +1211,23 @@ H5FL_blk_free(H5FL_blk_head_t *head, void *block) /* Save the block's size for later */ free_size = temp->size; + /* Make certain that the free list is initialized */ + assert(H5_GLOBAL_IS_INIT(head)); + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + #ifdef H5FL_DEBUG memset(temp, 255, free_size + sizeof(H5FL_blk_list_t)); #endif /* H5FL_DEBUG */ /* Check if there is a free list for native blocks of this size */ - if (NULL == (free_list = H5FL__blk_find_list(&(head->head), free_size))) + if (NULL == (free_list = H5FL__blk_find_list(&(head->pq), free_size))) /* No free list available, create a new list node and insert it to the queue */ - free_list = H5FL__blk_create_list(&(head->head), free_size); + free_list = H5FL__blk_create_list(&(head->pq), free_size); if (NULL == free_list) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "couldn't create new list node"); @@ -880,18 +1239,29 @@ H5FL_blk_free(H5FL_blk_head_t *head, void *block) free_list->onlist++; head->onlist++; head->list_mem += free_size; + list_mem = head->list_mem; + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Increment the amount of "block" freed memory globally */ - H5FL_blk_gc_head.mem_freed += free_size; + H5TS_ATOMIC_FETCH_ADD_SIZE_T(&H5FL_blk_gc_head.mem_freed, free_size); /* Check for exceeding free list memory use limits */ /* First check this particular list */ - if (head->list_mem > H5FL_blk_lst_mem_lim) + if (list_mem > H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_blk_lst_mem_lim)) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ if (H5FL__blk_gc_list(head) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, NULL, "garbage collection failed during free"); /* Then check the global amount memory on block free lists */ - if (H5FL_blk_gc_head.mem_freed > H5FL_blk_glb_mem_lim) + if (H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_blk_gc_head.mem_freed) > H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_blk_glb_mem_lim)) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ if (H5FL__blk_gc() < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, NULL, "garbage collection failed during free"); @@ -903,7 +1273,7 @@ H5FL_blk_free(H5FL_blk_head_t *head, void *block) * Function: H5FL_blk_realloc * * Purpose: Resizes a block. This does things the straightforward, simple way, - * not actually using realloc. + * not actually using realloc. * * Return: Success: NULL * @@ -963,15 +1333,28 @@ H5FL_blk_realloc(H5FL_blk_head_t *head, void *block, size_t new_size) static herr_t H5FL__blk_gc_list(H5FL_blk_head_t *head) { - H5FL_blk_node_t *blk_head; /* Temp. ptr to the free list page node */ + size_t total_freed = 0; /* Total amount of memory freed on a list */ + H5FL_blk_node_t *blk_head; /* Temp. ptr to the free list page node */ + herr_t ret_value = SUCCEED; /* Return value*/ +#ifdef H5_HAVE_CONCURRENCY + FUNC_ENTER_PACKAGE +#else /* H5_HAVE_CONCURRENCY */ FUNC_ENTER_PACKAGE_NOERR +#endif /* H5_HAVE_CONCURRENCY */ + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Loop through all the nodes in the block free list queue */ - blk_head = head->head; + blk_head = head->pq; while (blk_head != NULL) { - H5FL_blk_node_t *blk_next; /* Temp. ptr to the next free list node */ - H5FL_blk_list_t *list; /* The free list of native nodes of a particular size */ + H5FL_blk_node_t *blk_next; /* Temp. ptr to the next free list node */ + H5FL_blk_list_t *list; /* The free list of native nodes of a particular size */ + size_t list_freed = 0; /* Amount of memory freed on a list */ /* Sanity check */ assert((blk_head->onlist && blk_head->list) || (0 == blk_head->onlist && NULL == blk_head->list)); @@ -995,11 +1378,12 @@ H5FL__blk_gc_list(H5FL_blk_head_t *head) blk_head->allocated -= blk_head->onlist; head->allocated -= blk_head->onlist; - /* Decrement count of free memory on this "block" list */ - head->list_mem -= (blk_head->onlist * blk_head->size); + /* Track amount of memory freed */ + list_freed = blk_head->onlist * blk_head->size; + total_freed += list_freed; /* Decrement global count of free memory on "block" lists */ - H5FL_blk_gc_head.mem_freed -= (blk_head->onlist * blk_head->size); + head->list_mem -= list_freed; /* Indicate no free nodes on the free list */ blk_head->list = NULL; @@ -1011,8 +1395,8 @@ H5FL__blk_gc_list(H5FL_blk_head_t *head) /* Check for list completely unused now */ if (0 == blk_head->allocated) { /* Patch this node out of the PQ */ - if (head->head == blk_head) - head->head = blk_head->next; + if (head->pq == blk_head) + head->pq = blk_head->next; if (blk_head->prev) blk_head->prev->next = blk_head->next; if (blk_head->next) @@ -1032,7 +1416,19 @@ H5FL__blk_gc_list(H5FL_blk_head_t *head) /* Double check that all the memory on this list is recycled */ assert(0 == head->list_mem); - FUNC_LEAVE_NOAPI(SUCCEED) +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Decrement global count of free memory on "block" lists */ + H5TS_ATOMIC_FETCH_SUB_SIZE_T(&H5FL_blk_gc_head.mem_freed, total_freed); + +#ifdef H5_HAVE_CONCURRENCY +done: +#endif /* H5_HAVE_CONCURRENCY */ + FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL__blk_gc_list() */ /*------------------------------------------------------------------------- @@ -1053,19 +1449,30 @@ H5FL__blk_gc(void) FUNC_ENTER_PACKAGE - /* Walk through all the free lists, free()'ing the nodes */ - gc_node = H5FL_blk_gc_head.first; - while (gc_node != NULL) { - /* For each free list being garbage collected, walk through the nodes and free them */ - if (H5FL__blk_gc_list(gc_node->pq) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of list failed"); +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_blk_gc_head.init) { + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_blk_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ - /* Go on to the next free list to garbage collect */ - gc_node = gc_node->next; - } /* end while */ + /* Walk through all the free lists, free()'ing the nodes */ + gc_node = H5FL_blk_gc_head.first; + while (gc_node != NULL) { + /* For each free list being garbage collected, walk through the nodes and free them */ + if (H5FL__blk_gc_list(gc_node->pq) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of list failed"); - /* Double check that all the memory on the free lists are recycled */ - assert(H5FL_blk_gc_head.mem_freed == 0); + /* Go on to the next free list to garbage collect */ + gc_node = gc_node->next; + } /* end while */ + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_blk_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -1093,51 +1500,73 @@ H5FL__blk_gc(void) static int H5FL__blk_term(void) { - H5FL_blk_gc_node_t *left; /* pointer to garbage collection lists with work left */ + H5FL_blk_gc_node_t *left = NULL; /* pointer to garbage collection lists with work left */ FUNC_ENTER_PACKAGE_NOERR - /* Free the nodes on the garbage collection list, keeping nodes with allocations outstanding */ - left = NULL; - while (H5FL_blk_gc_head.first != NULL) { - H5FL_blk_gc_node_t *tmp; /* Temporary pointer to a garbage collection node */ +#ifdef H5_HAVE_CONCURRENCY + /* Have the block free lists been initialized? */ + if (H5FL_blk_gc_head.init) { +#endif /* H5_HAVE_CONCURRENCY */ + + /* Free the nodes on the garbage collection list, keeping nodes with allocations outstanding */ + while (H5FL_blk_gc_head.first != NULL) { + H5FL_blk_gc_node_t *tmp; /* Temporary pointer to a garbage collection node */ - tmp = H5FL_blk_gc_head.first->next; + tmp = H5FL_blk_gc_head.first->next; #ifdef H5FL_DEBUG - printf("%s: head->name = %s, head->allocated = %d\n", __func__, H5FL_blk_gc_head.first->pq->name, - (int)H5FL_blk_gc_head.first->pq->allocated); + printf("%s: head->name = %s, head->allocated = %d\n", __func__, H5FL_blk_gc_head.first->pq->name, + (int)H5FL_blk_gc_head.first->pq->allocated); #endif /* H5FL_DEBUG */ - /* Check if the list has allocations outstanding */ - if (H5FL_blk_gc_head.first->pq->allocated > 0) { - /* Add free list to the list of nodes with allocations open still */ - H5FL_blk_gc_head.first->next = left; - left = H5FL_blk_gc_head.first; - } /* end if */ - /* No allocations left open for list, get rid of it */ - else { - /* Reset the "initialized" flag, in case we restart this list somehow (I don't know how..) */ - H5FL_blk_gc_head.first->pq->init = false; + /* Check if the list has allocations outstanding */ + if (H5FL_blk_gc_head.first->pq->allocated > 0) { + /* Add free list to the list of nodes with allocations open still */ + H5FL_blk_gc_head.first->next = left; + left = H5FL_blk_gc_head.first; + } /* end if */ + /* No allocations left open for list, get rid of it */ + else { +#ifdef H5_HAVE_CONCURRENCY + /* Destroy the mutex protecting this list */ + H5TS_dlftt_mutex_destroy(&H5FL_blk_gc_head.first->pq->mutex); +#endif /* H5_HAVE_CONCURRENCY */ - /* Free the node from the garbage collection list */ - H5MM_free(H5FL_blk_gc_head.first); - } /* end else */ + /* Reset the "initialized" flag, in case we restart this list */ + H5_GLOBAL_SET_INIT(H5FL_blk_gc_head.first->pq, false); - H5FL_blk_gc_head.first = tmp; - } /* end while */ + /* Free the node from the garbage collection list */ + H5MM_free(H5FL_blk_gc_head.first); + } /* end else */ + + H5FL_blk_gc_head.first = tmp; + } /* end while */ - /* Point to the list of nodes left with allocations open, if any */ - H5FL_blk_gc_head.first = left; + /* Point to the list of nodes left with allocations open, if any */ + H5FL_blk_gc_head.first = left; - FUNC_LEAVE_NOAPI(H5FL_blk_gc_head.first != NULL ? 1 : 0) +#ifdef H5_HAVE_CONCURRENCY + /* Check for all lists being shut down */ + if (NULL == left) { + /* Destroy concurrency objects */ + H5TS_dlftt_mutex_destroy(&H5FL_blk_gc_head.mutex); + H5TS_ATOMIC_DESTROY_SIZE_T(&H5FL_blk_gc_head.mem_freed); + + /* Reset init flag */ + H5FL_blk_gc_head.init = false; + } + } +#endif /* H5_HAVE_CONCURRENCY */ + + FUNC_LEAVE_NOAPI(left != NULL ? 1 : 0) } /* end H5FL__blk_term() */ /*------------------------------------------------------------------------- * Function: H5FL__arr_init * * Purpose: Initialize a free list for a arrays of certain type. Right now, - * this just adds the free list to the list of things to garbage collect. + * this just adds the free list to the list of things to garbage collect. * * Return: Success: Non-negative * Failure: Negative @@ -1160,9 +1589,11 @@ H5FL__arr_init(H5FL_arr_head_t *head) /* Initialize the new garbage collection node */ new_node->list = head; - /* Link in to the garbage collection list */ - new_node->next = H5FL_arr_gc_head.first; - H5FL_arr_gc_head.first = new_node; +#ifdef H5_HAVE_CONCURRENCY + /* Initialize the mutex protecting this specific list */ + if (H5TS_dlftt_mutex_init(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Allocate room for the free lists */ if (NULL == @@ -1173,8 +1604,21 @@ H5FL__arr_init(H5FL_arr_head_t *head) for (u = 0; u < (size_t)head->maxelem; u++) head->list_arr[u].size = head->base_size + (head->elem_size * u); - /* Indicate that the free list is initialized */ - head->init = true; +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_arr_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Link in to the garbage collection list */ + new_node->next = H5FL_arr_gc_head.first; + H5FL_arr_gc_head.first = new_node; + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_arr_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -1196,6 +1640,7 @@ H5FL_arr_free(H5FL_arr_head_t *head, void *obj) H5FL_arr_list_t *temp; /* Temp. ptr to the new free list node allocated */ size_t mem_size; /* Size of memory being freed */ size_t free_nelem; /* Number of elements in node being free'd */ + size_t list_mem; /* Amount of memory in block on free list */ void *ret_value = NULL; /* Return value */ /* NOINIT OK here because this must be called after H5FL_arr_malloc/calloc @@ -1210,7 +1655,13 @@ H5FL_arr_free(H5FL_arr_head_t *head, void *obj) assert(head); /* Make certain that the free list is initialized */ - assert(head->init); + assert(H5_GLOBAL_IS_INIT(head)); + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Get the pointer to the info header in front of the block to free */ temp = (H5FL_arr_list_t *)((void *)((unsigned char *)obj - sizeof(H5FL_arr_list_t))); @@ -1233,18 +1684,29 @@ H5FL_arr_free(H5FL_arr_head_t *head, void *obj) /* Increment the number of blocks & memory used on free list */ head->list_arr[free_nelem].onlist++; head->list_mem += mem_size; + list_mem = head->list_mem; + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Increment the amount of "array" freed memory globally */ - H5FL_arr_gc_head.mem_freed += mem_size; + H5TS_ATOMIC_FETCH_ADD_SIZE_T(&H5FL_arr_gc_head.mem_freed, mem_size); /* Check for exceeding free list memory use limits */ /* First check this particular list */ - if (head->list_mem > H5FL_arr_lst_mem_lim) + if (list_mem > H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_arr_lst_mem_lim)) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ if (H5FL__arr_gc_list(head) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, NULL, "garbage collection failed during free"); /* Then check the global amount memory on array free lists */ - if (H5FL_arr_gc_head.mem_freed > H5FL_arr_glb_mem_lim) + if (H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_arr_gc_head.mem_freed) > H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_arr_glb_mem_lim)) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ if (H5FL__arr_gc() < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, NULL, "garbage collection failed during free"); @@ -1276,9 +1738,13 @@ H5FL_arr_malloc(H5FL_arr_head_t *head, size_t elem) assert(elem); /* Make certain the list is initialized first */ - if (!head->init) - if (H5FL__arr_init(head) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't initialize 'array' blocks"); + H5_GLOBAL_INIT(head, H5FL__arr_init, H5E_RESOURCE, H5E_CANTINIT, NULL, "can't initialize 'array' blocks"); + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Sanity check that the number of elements is supported */ assert(elem <= (unsigned)head->maxelem); @@ -1298,21 +1764,46 @@ H5FL_arr_malloc(H5FL_arr_head_t *head, size_t elem) head->list_arr[elem].onlist--; head->list_mem -= mem_size; +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Decrement the amount of global "array" free list memory in use */ - H5FL_arr_gc_head.mem_freed -= mem_size; + H5TS_ATOMIC_FETCH_SUB_SIZE_T(&H5FL_arr_gc_head.mem_freed, mem_size); } /* end if */ /* Otherwise allocate a node */ else { - if (NULL == (new_obj = (H5FL_arr_list_t *)H5FL__malloc(sizeof(H5FL_arr_list_t) + mem_size))) +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Allocate new memory */ + if (NULL == (new_obj = H5FL__malloc(sizeof(H5FL_arr_list_t) + mem_size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Increment the number of blocks of this size */ head->list_arr[elem].allocated++; /* Increment the number of blocks allocated in list, of all sizes */ head->allocated++; - } /* end else */ + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + } /* end else */ /* Initialize the new object */ new_obj->nelem = elem; @@ -1423,14 +1914,27 @@ H5FL_arr_realloc(H5FL_arr_head_t *head, void *obj, size_t new_elem) static herr_t H5FL__arr_gc_list(H5FL_arr_head_t *head) { - unsigned u; /* Counter for array of free lists */ + size_t total_freed = 0; /* Total amount of memory freed on a list */ + unsigned u; /* Counter for array of free lists */ + herr_t ret_value = SUCCEED; /* Return value*/ +#ifdef H5_HAVE_CONCURRENCY + FUNC_ENTER_PACKAGE +#else /* H5_HAVE_CONCURRENCY */ FUNC_ENTER_PACKAGE_NOERR +#endif /* H5_HAVE_CONCURRENCY */ + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting this list */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Walk through the array of free lists */ for (u = 0; u < (unsigned)head->maxelem; u++) { if (head->list_arr[u].onlist > 0) { - H5FL_arr_list_t *arr_free_list; /* Pointer to nodes in free list being garbage collected */ + H5FL_arr_list_t *arr_free_list; /* Pointer to nodes in free list being garbage collected */ + size_t list_freed = 0; /* Amount of memory freed on a list */ /* For each free list being garbage collected, walk through the nodes and free them */ arr_free_list = head->list_arr[u].list; @@ -1451,11 +1955,12 @@ H5FL__arr_gc_list(H5FL_arr_head_t *head) head->list_arr[u].allocated -= head->list_arr[u].onlist; head->allocated -= head->list_arr[u].onlist; - /* Decrement count of free memory on this "array" list */ - head->list_mem -= (head->list_arr[u].onlist * head->list_arr[u].size); + /* Track amount of memory freed */ + list_freed = head->list_arr[u].onlist * head->list_arr[u].size; + total_freed += list_freed; - /* Decrement global count of free memory on "array" lists */ - H5FL_arr_gc_head.mem_freed -= (head->list_arr[u].onlist * head->list_arr[u].size); + /* Decrement count of free memory on this "array" list */ + head->list_mem -= list_freed; /* Indicate no free nodes on the free list */ head->list_arr[u].list = NULL; @@ -1466,7 +1971,19 @@ H5FL__arr_gc_list(H5FL_arr_head_t *head) /* Double check that all the memory on this list is recycled */ assert(head->list_mem == 0); - FUNC_LEAVE_NOAPI(SUCCEED) +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting this list */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Decrement global count of free memory on "array" lists */ + H5TS_ATOMIC_FETCH_SUB_SIZE_T(&H5FL_arr_gc_head.mem_freed, total_freed); + +#ifdef H5_HAVE_CONCURRENCY +done: +#endif /* H5_HAVE_CONCURRENCY */ + FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL__arr_gc_list() */ /*------------------------------------------------------------------------- @@ -1487,19 +2004,30 @@ H5FL__arr_gc(void) FUNC_ENTER_PACKAGE - /* Walk through all the free lists, free()'ing the nodes */ - gc_arr_node = H5FL_arr_gc_head.first; - while (gc_arr_node != NULL) { - /* Release the free nodes on the list */ - if (H5FL__arr_gc_list(gc_arr_node->list) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of list failed"); +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_arr_gc_head.init) { + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_arr_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ - /* Go on to the next free list to garbage collect */ - gc_arr_node = gc_arr_node->next; - } /* end while */ + /* Walk through all the free lists, free()'ing the nodes */ + gc_arr_node = H5FL_arr_gc_head.first; + while (gc_arr_node != NULL) { + /* Release the free nodes on the list */ + if (H5FL__arr_gc_list(gc_arr_node->list) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of list failed"); + + /* Go on to the next free list to garbage collect */ + gc_arr_node = gc_arr_node->next; + } /* end while */ - /* Double check that all the memory on the free lists are recycled */ - assert(H5FL_arr_gc_head.mem_freed == 0); +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_arr_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -1527,46 +2055,68 @@ H5FL__arr_gc(void) static int H5FL__arr_term(void) { - H5FL_gc_arr_node_t *left; /* pointer to garbage collection lists with work left */ + H5FL_gc_arr_node_t *left = NULL; /* pointer to garbage collection lists with work left */ FUNC_ENTER_PACKAGE_NOERR - /* Free the nodes on the garbage collection list, keeping nodes with allocations outstanding */ - left = NULL; - while (H5FL_arr_gc_head.first != NULL) { - H5FL_gc_arr_node_t *tmp; /* Temporary pointer to a garbage collection node */ +#ifdef H5_HAVE_CONCURRENCY + /* Have the array free lists been initialized? */ + if (H5FL_arr_gc_head.init) { +#endif /* H5_HAVE_CONCURRENCY */ - tmp = H5FL_arr_gc_head.first->next; + /* Free the nodes on the garbage collection list, keeping nodes with allocations outstanding */ + while (H5FL_arr_gc_head.first != NULL) { + H5FL_gc_arr_node_t *tmp; /* Temporary pointer to a garbage collection node */ - /* Check if the list has allocations outstanding */ + tmp = H5FL_arr_gc_head.first->next; + + /* Check if the list has allocations outstanding */ #ifdef H5FL_DEBUG - printf("%s: head->name = %s, head->allocated = %d\n", __func__, H5FL_arr_gc_head.first->list->name, - (int)H5FL_arr_gc_head.first->list->allocated); + printf("%s: head->name = %s, head->allocated = %d\n", __func__, + H5FL_arr_gc_head.first->list->name, (int)H5FL_arr_gc_head.first->list->allocated); #endif /* H5FL_DEBUG */ - if (H5FL_arr_gc_head.first->list->allocated > 0) { - /* Add free list to the list of nodes with allocations open still */ - H5FL_arr_gc_head.first->next = left; - left = H5FL_arr_gc_head.first; - } /* end if */ - /* No allocations left open for list, get rid of it */ - else { - /* Free the array of free lists */ - H5MM_xfree(H5FL_arr_gc_head.first->list->list_arr); + if (H5FL_arr_gc_head.first->list->allocated > 0) { + /* Add free list to the list of nodes with allocations open still */ + H5FL_arr_gc_head.first->next = left; + left = H5FL_arr_gc_head.first; + } /* end if */ + /* No allocations left open for list, get rid of it */ + else { + /* Free the array of free lists */ + H5MM_xfree(H5FL_arr_gc_head.first->list->list_arr); - /* Reset the "initialized" flag, in case we restart this list somehow (I don't know how..) */ - H5FL_arr_gc_head.first->list->init = false; +#ifdef H5_HAVE_CONCURRENCY + /* Destroy the mutex protecting this list */ + H5TS_dlftt_mutex_destroy(&H5FL_arr_gc_head.first->list->mutex); +#endif /* H5_HAVE_CONCURRENCY */ - /* Free the node from the garbage collection list */ - H5MM_free(H5FL_arr_gc_head.first); - } /* end else */ + /* Reset the "initialized" flag, in case we restart this list */ + H5_GLOBAL_SET_INIT(H5FL_arr_gc_head.first->list, false); - H5FL_arr_gc_head.first = tmp; - } /* end while */ + /* Free the node from the garbage collection list */ + H5MM_free(H5FL_arr_gc_head.first); + } /* end else */ + + H5FL_arr_gc_head.first = tmp; + } /* end while */ + + /* Point to the list of nodes left with allocations open, if any */ + H5FL_arr_gc_head.first = left; - /* Point to the list of nodes left with allocations open, if any */ - H5FL_arr_gc_head.first = left; +#ifdef H5_HAVE_CONCURRENCY + /* Check for all lists being shut down */ + if (NULL == left) { + /* Destroy concurrency objects */ + H5TS_dlftt_mutex_destroy(&H5FL_arr_gc_head.mutex); + H5TS_ATOMIC_DESTROY_SIZE_T(&H5FL_arr_gc_head.mem_freed); + + /* Reset init flag */ + H5FL_arr_gc_head.init = false; + } + } +#endif /* H5_HAVE_CONCURRENCY */ - FUNC_LEAVE_NOAPI(H5FL_arr_gc_head.first != NULL ? 1 : 0) + FUNC_LEAVE_NOAPI(left != NULL ? 1 : 0) } /* end H5FL__arr_term() */ /*------------------------------------------------------------------------- @@ -1590,9 +2140,6 @@ H5FL_seq_free(H5FL_seq_head_t *head, void *obj) assert(head); assert(obj); - /* Make certain that the free list is initialized */ - assert(head->queue.init); - /* Use block routine */ H5FL_blk_free(&(head->queue), obj); @@ -1716,19 +2263,32 @@ H5FL_fac_init(size_t size) /* Initialize the new garbage collection node */ new_node->list = factory; + /* Make certain that the space allocated is large enough to store a free list pointer (eventually) */ + if (factory->size < sizeof(H5FL_fac_node_t)) + factory->size = sizeof(H5FL_fac_node_t); + +#ifdef H5_HAVE_CONCURRENCY + /* Initialize the mutex protecting this factory */ + if (H5TS_dlftt_mutex_init(&factory->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't initialize factory's mutex"); + + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Link in to the garbage collection list */ new_node->next = H5FL_fac_gc_head.first; H5FL_fac_gc_head.first = new_node; if (new_node->next) new_node->next->list->prev_gc = new_node; - /* The new factory's prev_gc field will be set to NULL */ + /* The new factory's prev_gc field will be set to NULL */ - /* Make certain that the space allocated is large enough to store a free list pointer (eventually) */ - if (factory->size < sizeof(H5FL_fac_node_t)) - factory->size = sizeof(H5FL_fac_node_t); - - /* Indicate that the free list is initialized */ - factory->init = true; +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Set return value */ ret_value = factory; @@ -1756,7 +2316,8 @@ H5FL_fac_init(size_t size) void * H5FL_fac_free(H5FL_fac_head_t *head, void *obj) { - void *ret_value = NULL; /* Return value */ + unsigned onlist; /* Number of blocks on free list */ + void *ret_value = NULL; /* Return value */ /* NOINIT OK here because this must be called after H5FL_fac_init -NAF */ FUNC_ENTER_NOAPI_NOINIT @@ -1765,13 +2326,16 @@ H5FL_fac_free(H5FL_fac_head_t *head, void *obj) assert(head); assert(obj); +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the factory */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + #ifdef H5FL_DEBUG memset(obj, 255, head->size); #endif /* H5FL_DEBUG */ - /* Make certain that the free list is initialized */ - assert(head->init); - /* Link into the free list */ ((H5FL_fac_node_t *)obj)->next = head->list; @@ -1779,19 +2343,29 @@ H5FL_fac_free(H5FL_fac_head_t *head, void *obj) head->list = (H5FL_fac_node_t *)obj; /* Increment the number of blocks on free list */ - head->onlist++; + onlist = ++head->onlist; + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the factory */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Increment the amount of "factory" freed memory globally */ - H5FL_fac_gc_head.mem_freed += head->size; + H5TS_ATOMIC_FETCH_ADD_SIZE_T(&H5FL_fac_gc_head.mem_freed, head->size); /* Check for exceeding free list memory use limits */ /* First check this particular list */ - if (head->onlist * head->size > H5FL_fac_lst_mem_lim) + if (onlist * head->size > H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_fac_lst_mem_lim)) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ if (H5FL__fac_gc_list(head) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, NULL, "garbage collection failed during free"); /* Then check the global amount memory on factory free lists */ - if (H5FL_fac_gc_head.mem_freed > H5FL_fac_glb_mem_lim) + if (H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_fac_gc_head.mem_freed) > H5TS_ATOMIC_LOAD_SIZE_T(&H5FL_fac_glb_mem_lim)) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ if (H5FL__fac_gc() < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, NULL, "garbage collection failed during free"); @@ -1819,7 +2393,12 @@ H5FL_fac_malloc(H5FL_fac_head_t *head) /* Double check parameters */ assert(head); - assert(head->init); + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the factory */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ /* Check for nodes available on the free list first */ if (head->list != NULL) { @@ -1832,17 +2411,41 @@ H5FL_fac_malloc(H5FL_fac_head_t *head) /* Decrement the number of blocks & memory on free list */ head->onlist--; +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the factory */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Decrement the amount of global "factory" free list memory in use */ - H5FL_fac_gc_head.mem_freed -= (head->size); + H5TS_ATOMIC_FETCH_SUB_SIZE_T(&H5FL_fac_gc_head.mem_freed, head->size); } /* end if */ /* Otherwise allocate a node */ else { +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the factory */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + if (NULL == (ret_value = H5FL__malloc(head->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the factory */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, NULL, "can't lock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Increment the number of blocks allocated in list */ head->allocated++; - } /* end else */ + +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the factory */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, NULL, "can't unlock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -1893,9 +2496,24 @@ H5FL_fac_calloc(H5FL_fac_head_t *head) static herr_t H5FL__fac_gc_list(H5FL_fac_head_t *head) { - H5FL_fac_node_t *free_list; /* Pointer to nodes in free list being garbage collected */ + H5FL_fac_node_t *free_list; /* Pointer to nodes in free list being garbage collected */ + unsigned onlist; /* Number of blocks on free list */ + herr_t ret_value = SUCCEED; /* Return value*/ +#ifdef H5_HAVE_CONCURRENCY + FUNC_ENTER_PACKAGE +#else /* H5_HAVE_CONCURRENCY */ FUNC_ENTER_PACKAGE_NOERR +#endif /* H5_HAVE_CONCURRENCY */ + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the factory */ + if (H5TS_dlftt_mutex_acquire(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Save for later */ + onlist = head->onlist; /* For each free list being garbage collected, walk through the nodes and free them */ free_list = head->list; @@ -1915,14 +2533,23 @@ H5FL__fac_gc_list(H5FL_fac_head_t *head) /* Decrement the count of nodes allocated and free the node */ head->allocated -= head->onlist; - /* Decrement global count of free memory on "factory" lists */ - H5FL_fac_gc_head.mem_freed -= (head->onlist * head->size); - /* Indicate no free nodes on the free list */ head->list = NULL; head->onlist = 0; - FUNC_LEAVE_NOAPI(SUCCEED) +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the factory */ + if (H5TS_dlftt_mutex_release(&head->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Decrement global count of free memory on "factory" lists */ + H5TS_ATOMIC_FETCH_SUB_SIZE_T(&H5FL_fac_gc_head.mem_freed, (onlist * head->size)); + +#ifdef H5_HAVE_CONCURRENCY +done: +#endif /* H5_HAVE_CONCURRENCY */ + FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL__fac_gc_list() */ /*------------------------------------------------------------------------- @@ -1943,19 +2570,30 @@ H5FL__fac_gc(void) FUNC_ENTER_PACKAGE - /* Walk through all the free lists, free()'ing the nodes */ - gc_node = H5FL_fac_gc_head.first; - while (gc_node != NULL) { - /* Release the free nodes on the list */ - if (H5FL__fac_gc_list(gc_node->list) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of list failed"); +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_fac_gc_head.init) { + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ - /* Go on to the next free list to garbage collect */ - gc_node = gc_node->next; - } /* end while */ + /* Walk through all the free lists, free()'ing the nodes */ + gc_node = H5FL_fac_gc_head.first; + while (gc_node != NULL) { + /* Release the free nodes on the list */ + if (H5FL__fac_gc_list(gc_node->list) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of list failed"); + + /* Go on to the next free list to garbage collect */ + gc_node = gc_node->next; + } /* end while */ - /* Double check that all the memory on the free lists is recycled */ - assert(H5FL_fac_gc_head.mem_freed == 0); +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -1987,6 +2625,18 @@ H5FL_fac_term(H5FL_fac_head_t *factory) if (H5FL__fac_gc_list(factory) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "garbage collection of factory failed"); +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + +#ifdef H5_HAVE_CONCURRENCY + /* Acquire the mutex protecting the factory */ + if (H5TS_dlftt_mutex_acquire(&factory->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock factory's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Verify that all the blocks have been freed */ if (factory->allocated > 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "factory still has objects allocated"); @@ -2012,6 +2662,20 @@ H5FL_fac_term(H5FL_fac_head_t *factory) tmp->list->prev_gc = NULL; } /* end else */ +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the factory */ + if (H5TS_dlftt_mutex_release(&factory->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock factory's mutex"); + + /* Destroy the mutex protecting this factory */ + if (H5TS_dlftt_mutex_destroy(&factory->mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't destroy factory's mutex"); + + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + /* Free factory info */ factory = H5FL_FREE(H5FL_fac_head_t, factory); @@ -2034,28 +2698,40 @@ H5FL__fac_term_all(void) { FUNC_ENTER_PACKAGE_NOERR - /* Free the nodes on the garbage collection list */ - while (H5FL_fac_gc_head.first != NULL) { - H5FL_fac_gc_node_t *tmp; /* Temporary pointer to a garbage collection node */ +#ifdef H5_HAVE_CONCURRENCY + /* Have the array free lists been initialized? */ + if (H5FL_fac_gc_head.init) { +#endif /* H5_HAVE_CONCURRENCY */ - tmp = H5FL_fac_gc_head.first->next; + /* Free the nodes on the garbage collection list */ + while (H5FL_fac_gc_head.first != NULL) { + H5FL_fac_gc_node_t *tmp; /* Temporary pointer to a garbage collection node */ + + tmp = H5FL_fac_gc_head.first->next; #ifdef H5FL_DEBUG - printf("%s: head->size = %d, head->allocated = %d\n", __func__, - (int)H5FL_fac_gc_head.first->list->size, (int)H5FL_fac_gc_head.first->list->allocated); + printf("%s: head->size = %d, head->allocated = %d\n", __func__, + (int)H5FL_fac_gc_head.first->list->size, (int)H5FL_fac_gc_head.first->list->allocated); #endif /* H5FL_DEBUG */ - /* The list cannot have any allocations outstanding */ - assert(H5FL_fac_gc_head.first->list->allocated == 0); + /* The list cannot have any allocations outstanding */ + assert(H5FL_fac_gc_head.first->list->allocated == 0); + + /* Free the node from the garbage collection list */ + H5FL_fac_gc_head.first = H5FL_FREE(H5FL_fac_gc_node_t, H5FL_fac_gc_head.first); - /* Reset the "initialized" flag, in case we restart this list somehow (I don't know how..) */ - H5FL_fac_gc_head.first->list->init = false; + H5FL_fac_gc_head.first = tmp; + } /* end while */ - /* Free the node from the garbage collection list */ - H5FL_fac_gc_head.first = H5FL_FREE(H5FL_fac_gc_node_t, H5FL_fac_gc_head.first); +#ifdef H5_HAVE_CONCURRENCY + /* Destroy concurrency objects */ + H5TS_dlftt_mutex_destroy(&H5FL_fac_gc_head.mutex); + H5TS_ATOMIC_DESTROY_SIZE_T(&H5FL_fac_gc_head.mem_freed); - H5FL_fac_gc_head.first = tmp; - } /* end while */ + /* Reset init flag */ + H5FL_fac_gc_head.init = false; + } +#endif /* H5_HAVE_CONCURRENCY */ FUNC_LEAVE_NOAPI(0) } /* end H5FL__fac_term_all() */ @@ -2077,6 +2753,9 @@ H5FL_garbage_coll(void) FUNC_ENTER_NOAPI(FAIL) + /* It's possible that 2+ threads could race and garbage collect, but */ + /* that's OK, on the rare occasions it happens */ + /* Garbage collect the free lists for array objects */ if (H5FL__arr_gc() < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGC, FAIL, "can't garbage collect array objects"); @@ -2125,27 +2804,36 @@ herr_t H5FL_set_free_list_limits(int reg_global_lim, int reg_list_lim, int arr_global_lim, int arr_list_lim, int blk_global_lim, int blk_list_lim, int fac_global_lim, int fac_list_lim) { + size_t lim; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOERR /* Set the limit variables */ /* limit on all regular free lists */ - H5FL_reg_glb_mem_lim = (reg_global_lim == -1 ? UINT_MAX : (size_t)reg_global_lim); + lim = (reg_global_lim == -1 ? UINT_MAX : (size_t)reg_global_lim); + H5TS_ATOMIC_STORE_SIZE_T(&H5FL_reg_glb_mem_lim, lim); /* limit on each regular free list */ - H5FL_reg_lst_mem_lim = (reg_list_lim == -1 ? UINT_MAX : (size_t)reg_list_lim); + lim = (reg_list_lim == -1 ? UINT_MAX : (size_t)reg_list_lim); + H5TS_ATOMIC_STORE_SIZE_T(&H5FL_reg_lst_mem_lim, lim); /* limit on all array free lists */ - H5FL_arr_glb_mem_lim = (arr_global_lim == -1 ? UINT_MAX : (size_t)arr_global_lim); + lim = (arr_global_lim == -1 ? UINT_MAX : (size_t)arr_global_lim); + H5TS_ATOMIC_STORE_SIZE_T(&H5FL_arr_glb_mem_lim, lim); /* limit on each array free list */ - H5FL_arr_lst_mem_lim = (arr_list_lim == -1 ? UINT_MAX : (size_t)arr_list_lim); + lim = (arr_list_lim == -1 ? UINT_MAX : (size_t)arr_list_lim); + H5TS_ATOMIC_STORE_SIZE_T(&H5FL_arr_lst_mem_lim, lim); /* limit on all block free lists */ - H5FL_blk_glb_mem_lim = (blk_global_lim == -1 ? UINT_MAX : (size_t)blk_global_lim); + lim = (blk_global_lim == -1 ? UINT_MAX : (size_t)blk_global_lim); + H5TS_ATOMIC_STORE_SIZE_T(&H5FL_blk_glb_mem_lim, lim); /* limit on each block free list */ - H5FL_blk_lst_mem_lim = (blk_list_lim == -1 ? UINT_MAX : (size_t)blk_list_lim); + lim = (blk_list_lim == -1 ? UINT_MAX : (size_t)blk_list_lim); + H5TS_ATOMIC_STORE_SIZE_T(&H5FL_blk_lst_mem_lim, lim); /* limit on all factory free lists */ - H5FL_fac_glb_mem_lim = (fac_global_lim == -1 ? UINT_MAX : (size_t)fac_global_lim); + lim = (fac_global_lim == -1 ? UINT_MAX : (size_t)fac_global_lim); + H5TS_ATOMIC_STORE_SIZE_T(&H5FL_fac_glb_mem_lim, lim); /* limit on each factory free list */ - H5FL_fac_lst_mem_lim = (fac_list_lim == -1 ? UINT_MAX : (size_t)fac_list_lim); + lim = (fac_list_lim == -1 ? UINT_MAX : (size_t)fac_list_lim); + H5TS_ATOMIC_STORE_SIZE_T(&H5FL_fac_lst_mem_lim, lim); FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL_set_free_list_limits() */ @@ -2172,99 +2860,160 @@ H5FL_set_free_list_limits(int reg_global_lim, int reg_list_lim, int arr_global_l herr_t H5FL_get_free_list_sizes(size_t *reg_size, size_t *arr_size, size_t *blk_size, size_t *fac_size) { + herr_t ret_value = SUCCEED; /* Return value*/ + +#ifdef H5_HAVE_CONCURRENCY + FUNC_ENTER_NOAPI(FAIL) +#else /* H5_HAVE_CONCURRENCY */ FUNC_ENTER_NOAPI_NOERR +#endif /* H5_HAVE_CONCURRENCY */ /* Retrieve the amount of "regular" memory used */ if (reg_size) { H5FL_reg_gc_node_t *gc_node; /* Pointer into the list of lists */ - /* Walk through all the free lists, counting the amount of memory */ - *reg_size = 0; - gc_node = H5FL_reg_gc_head.first; - while (gc_node != NULL) { - H5FL_reg_head_t *reg_list = gc_node->list; /* Head of list */ +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_reg_gc_head.init) { + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_reg_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ - /* Sanity check */ - assert(reg_list->init); + /* Walk through all the free lists, counting the amount of memory */ + *reg_size = 0; + gc_node = H5FL_reg_gc_head.first; + while (gc_node != NULL) { + H5FL_reg_head_t *reg_list = gc_node->list; /* Head of list */ - /* Add the amount of memory for this list */ - *reg_size += (reg_list->size * reg_list->allocated); + /* Sanity check */ + assert(H5_GLOBAL_IS_INIT(reg_list)); - /* Go on to the next free list */ - gc_node = gc_node->next; - } /* end while */ - } /* end if */ + /* Add the amount of memory for this list */ + *reg_size += (reg_list->size * reg_list->allocated); + + /* Go on to the next free list */ + gc_node = gc_node->next; + } /* end while */ +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_reg_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ + } /* end if */ /* Retrieve the amount of "array" memory used */ if (arr_size) { H5FL_gc_arr_node_t *gc_arr_node; /* Pointer into the list of things to garbage collect */ - /* Walk through all the free lists, counting the amount of memory */ - *arr_size = 0; - gc_arr_node = H5FL_arr_gc_head.first; - while (gc_arr_node != NULL) { - H5FL_arr_head_t *head = gc_arr_node->list; /* Head of array list elements */ - - /* Sanity check */ - assert(head->init); - - /* Check for any allocated elements in this list */ - if (head->allocated > 0) { - unsigned u; - - /* Walk through the free lists for array sizes */ - for (u = 0; u < (unsigned)head->maxelem; u++) - /* Add the amount of memory for this size */ - *arr_size += head->list_arr[u].allocated * head->list_arr[u].size; - } /* end if */ +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_arr_gc_head.init) { + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_arr_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Walk through all the free lists, counting the amount of memory */ + *arr_size = 0; + gc_arr_node = H5FL_arr_gc_head.first; + while (gc_arr_node != NULL) { + H5FL_arr_head_t *head = gc_arr_node->list; /* Head of array list elements */ + + /* Sanity check */ + assert(H5_GLOBAL_IS_INIT(head)); + + /* Check for any allocated elements in this list */ + if (head->allocated > 0) { + unsigned u; + + /* Walk through the free lists for array sizes */ + for (u = 0; u < (unsigned)head->maxelem; u++) + /* Add the amount of memory for this size */ + *arr_size += head->list_arr[u].allocated * head->list_arr[u].size; + } /* end if */ - /* Go on to the next free list */ - gc_arr_node = gc_arr_node->next; - } /* end while */ - } /* end if */ + /* Go on to the next free list */ + gc_arr_node = gc_arr_node->next; + } /* end while */ +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_arr_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ + } /* end if */ /* Retrieve the amount of "block" memory used */ if (blk_size) { H5FL_blk_gc_node_t *gc_blk_node; /* Pointer into the list of things */ - /* Walk through all the free lists */ - gc_blk_node = H5FL_blk_gc_head.first; - *blk_size = 0; - while (gc_blk_node != NULL) { - H5FL_blk_node_t *blk_head; /* Temp. ptr to the free list block node */ - - /* Loop through all the nodes in the block free list queue */ - blk_head = gc_blk_node->pq->head; - while (blk_head != NULL) { - /* Add size of blocks on this list */ - *blk_size += (blk_head->allocated * blk_head->size); - - /* Get pointer to next node */ - blk_head = blk_head->next; +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_blk_gc_head.init) { + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_blk_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ + + /* Walk through all the free lists */ + gc_blk_node = H5FL_blk_gc_head.first; + *blk_size = 0; + while (gc_blk_node != NULL) { + H5FL_blk_node_t *blk_head; /* Temp. ptr to the free list block node */ + + /* Loop through all the nodes in the block free list queue */ + blk_head = gc_blk_node->pq->pq; + while (blk_head != NULL) { + /* Add size of blocks on this list */ + *blk_size += (blk_head->allocated * blk_head->size); + + /* Get pointer to next node */ + blk_head = blk_head->next; + } /* end while */ + + /* Go on to the next free list */ + gc_blk_node = gc_blk_node->next; } /* end while */ - - /* Go on to the next free list */ - gc_blk_node = gc_blk_node->next; - } /* end while */ - } /* end if */ +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_blk_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ + } /* end if */ /* Retrieve the amount of "factory" memory used */ if (fac_size) { H5FL_fac_gc_node_t *gc_fac_node; /* Pointer into the list of things to garbage collect */ - /* Walk through all the free lists */ - gc_fac_node = H5FL_fac_gc_head.first; - *fac_size = 0; - while (gc_fac_node != NULL) { - H5FL_fac_head_t *fac_head = gc_fac_node->list; /* Head node for factory list */ +#ifdef H5_HAVE_CONCURRENCY + if (H5FL_fac_gc_head.init) { + /* Acquire the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_acquire(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTLOCK, FAIL, "can't lock list of list's mutex"); +#endif /* H5_HAVE_CONCURRENCY */ - /* Add size of blocks on this list */ - *fac_size += (fac_head->allocated * fac_head->size); + /* Walk through all the free lists */ + gc_fac_node = H5FL_fac_gc_head.first; + *fac_size = 0; + while (gc_fac_node != NULL) { + H5FL_fac_head_t *fac_head = gc_fac_node->list; /* Head node for factory list */ - /* Go on to the next free list to garbage collect */ - gc_fac_node = gc_fac_node->next; - } /* end while */ - } /* end if */ + /* Add size of blocks on this list */ + *fac_size += (fac_head->allocated * fac_head->size); - FUNC_LEAVE_NOAPI(SUCCEED) + /* Go on to the next free list to garbage collect */ + gc_fac_node = gc_fac_node->next; + } /* end while */ +#ifdef H5_HAVE_CONCURRENCY + /* Release the mutex protecting the list of lists */ + if (H5TS_dlftt_mutex_release(&H5FL_fac_gc_head.mutex) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTUNLOCK, FAIL, "can't unlock list of list's mutex"); + } +#endif /* H5_HAVE_CONCURRENCY */ + } /* end if */ + +#ifdef H5_HAVE_CONCURRENCY +done: +#endif /* H5_HAVE_CONCURRENCY */ + FUNC_LEAVE_NOAPI(ret_value) } /* end H5FL_get_free_list_sizes() */ diff --git a/src/H5FLprivate.h b/src/H5FLprivate.h index c9f79b3c7da..c42bf8fa290 100644 --- a/src/H5FLprivate.h +++ b/src/H5FLprivate.h @@ -24,6 +24,7 @@ /* Public headers needed by this file */ /* Private headers needed by this file */ +#include "H5TSprivate.h" /* Threadsafety */ /* Macros for turning off free lists in the library */ /*#define H5_NO_FREE_LISTS*/ @@ -46,7 +47,11 @@ typedef struct H5FL_reg_node_t { /* Data structure for free list of blocks */ typedef struct H5FL_reg_head_t { - bool init; /* Whether the free list has been initialized */ + H5_global_t dlcp_info; /* Information for init */ + /* (MUST be first field in structure) */ +#ifdef H5_HAVE_CONCURRENCY + H5TS_dlftt_mutex_t mutex; /* Guard access to this free list */ +#endif /* H5_HAVE_CONCURRENCY */ unsigned allocated; /* Number of blocks allocated */ unsigned onlist; /* Number of blocks on free list */ const char *name; /* Name of the type */ @@ -60,7 +65,7 @@ typedef struct H5FL_reg_head_t { #define H5FL_REG_NAME(t) H5_##t##_reg_free_list #ifndef H5_NO_REG_FREE_LISTS /* Common macros for H5FL_DEFINE & H5FL_DEFINE_STATIC */ -#define H5FL_DEFINE_COMMON(t) H5FL_reg_head_t H5FL_REG_NAME(t) = {0, 0, 0, #t, sizeof(t), NULL} +#define H5FL_DEFINE_COMMON(t) H5FL_reg_head_t H5FL_REG_NAME(t) = {.name = #t, .size = sizeof(t)} /* Declare a free list to manage objects of type 't' */ #define H5FL_DEFINE(t) H5_DLL H5FL_DEFINE_COMMON(t) @@ -117,12 +122,16 @@ typedef struct H5FL_blk_node_t { /* Data structure for priority queue of native block free lists */ typedef struct H5FL_blk_head_t { - bool init; /* Whether the free list has been initialized */ + H5_global_t dlcp_info; /* Information for init */ + /* (MUST be first field in structure) */ +#ifdef H5_HAVE_CONCURRENCY + H5TS_dlftt_mutex_t mutex; /* Guard access to this free list */ +#endif /* H5_HAVE_CONCURRENCY */ unsigned allocated; /* Total number of blocks allocated */ unsigned onlist; /* Total number of blocks on free list */ size_t list_mem; /* Total amount of memory in blocks on free list */ const char *name; /* Name of the type */ - H5FL_blk_node_t *head; /* Pointer to first free list in queue */ + H5FL_blk_node_t *pq; /* Pointer to first free list in queue */ } H5FL_blk_head_t; /* @@ -131,7 +140,7 @@ typedef struct H5FL_blk_head_t { #define H5FL_BLK_NAME(t) H5_##t##_blk_free_list #ifndef H5_NO_BLK_FREE_LISTS /* Common macro for H5FL_BLK_DEFINE & H5FL_BLK_DEFINE_STATIC */ -#define H5FL_BLK_DEFINE_COMMON(t) H5FL_blk_head_t H5FL_BLK_NAME(t) = {0, 0, 0, 0, #t "_blk", NULL} +#define H5FL_BLK_DEFINE_COMMON(t) H5FL_blk_head_t H5FL_BLK_NAME(t) = {.name = #t "_blk"} /* Declare a free list to manage objects of type 't' */ #define H5FL_BLK_DEFINE(t) H5_DLL H5FL_BLK_DEFINE_COMMON(t) @@ -190,7 +199,11 @@ typedef struct H5FL_arr_node_t { /* Data structure for free list of array blocks */ typedef struct H5FL_arr_head_t { - bool init; /* Whether the free list has been initialized */ + H5_global_t dlcp_info; /* Information for init */ + /* (MUST be first field in structure) */ +#ifdef H5_HAVE_CONCURRENCY + H5TS_dlftt_mutex_t mutex; /* Guard access to this free list */ +#endif /* H5_HAVE_CONCURRENCY */ unsigned allocated; /* Total number of blocks allocated */ size_t list_mem; /* Amount of memory in block on free list */ const char *name; /* Name of the type */ @@ -207,7 +220,8 @@ typedef struct H5FL_arr_head_t { #ifndef H5_NO_ARR_FREE_LISTS /* Common macro for H5FL_ARR_DEFINE & H5FL_ARR_DEFINE_STATIC (and H5FL_BARR variants) */ #define H5FL_ARR_DEFINE_COMMON(b, t, m) \ - H5FL_arr_head_t H5FL_ARR_NAME(t) = {0, 0, 0, #t "_arr", m + 1, b, sizeof(t), NULL} + H5FL_arr_head_t H5FL_ARR_NAME(t) = { \ + .name = #t "_arr", .maxelem = m + 1, .base_size = b, .elem_size = sizeof(t)} /* Declare a free list to manage arrays of type 't' */ #define H5FL_ARR_DEFINE(t, m) H5_DLL H5FL_ARR_DEFINE_COMMON(0, t, m) @@ -266,8 +280,7 @@ typedef struct H5FL_seq_head_t { #define H5FL_SEQ_NAME(t) H5_##t##_seq_free_list #ifndef H5_NO_SEQ_FREE_LISTS /* Common macro for H5FL_SEQ_DEFINE & H5FL_SEQ_DEFINE_STATIC */ -#define H5FL_SEQ_DEFINE_COMMON(t) \ - H5FL_seq_head_t H5FL_SEQ_NAME(t) = {{0, 0, 0, 0, #t "_seq", NULL}, sizeof(t)} +#define H5FL_SEQ_DEFINE_COMMON(t) H5FL_seq_head_t H5FL_SEQ_NAME(t) = {{.name = #t "_seq"}, .size = sizeof(t)} /* Declare a free list to manage sequences of type 't' */ #define H5FL_SEQ_DEFINE(t) H5_DLL H5FL_SEQ_DEFINE_COMMON(t) @@ -309,7 +322,9 @@ typedef struct H5FL_fac_node_t H5FL_fac_node_t; /* Data structure for free list block factory */ typedef struct H5FL_fac_head_t { - bool init; /* Whether the free list has been initialized */ +#ifdef H5_HAVE_CONCURRENCY + H5TS_dlftt_mutex_t mutex; /* Guard access to this factory */ +#endif /* H5_HAVE_CONCURRENCY */ unsigned allocated; /* Number of blocks allocated */ unsigned onlist; /* Number of blocks on free list */ size_t size; /* Size of the blocks in the list */ @@ -375,6 +390,9 @@ H5_DLL void *H5FL_fac_free(H5FL_fac_head_t *head, void *obj); H5_DLL herr_t H5FL_fac_term(H5FL_fac_head_t *head); /* General free list routines */ +#ifdef H5_HAVE_CONCURRENCY +H5_DLL herr_t H5FL_init(void); +#endif /* H5_HAVE_CONCURRENCY */ H5_DLL herr_t H5FL_garbage_coll(void); H5_DLL herr_t H5FL_set_free_list_limits(int reg_global_lim, int reg_list_lim, int arr_global_lim, int arr_list_lim, int blk_global_lim, int blk_list_lim, diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 59d1d8d31b2..1a40972bce9 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -1450,38 +1450,6 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't decode refcount"); oh->nlink = *refcount; } - /* Check if message is an old mtime message */ - else if (H5O_MTIME_ID == id) { - time_t *mtime = NULL; - - /* Decode mtime message */ - mtime = - (time_t *)(H5O_MSG_MTIME->decode)(udata->f, NULL, 0, &ioflags, mesg->raw_size, mesg->raw); - - /* Save the decoded old format mtime */ - if (!mtime) - HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, FAIL, "can't decode old format mtime"); - - /* Save 'native' form of mtime message and its value */ - mesg->native = mtime; - oh->ctime = *mtime; - } - /* Check if message is an new mtime message */ - else if (H5O_MTIME_NEW_ID == id) { - time_t *mtime = NULL; - - /* Decode mtime message */ - mtime = (time_t *)(H5O_MSG_MTIME_NEW->decode)(udata->f, NULL, 0, &ioflags, mesg->raw_size, - mesg->raw); - - /* Save the decoded new format mtime */ - if (!mtime) - HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, FAIL, "can't decode new format mtime"); - - /* Save 'native' form of mtime message and its value */ - mesg->native = mtime; - oh->ctime = *mtime; - } /* Check if message is a link message */ else if (H5O_LINK_ID == id) { /* Increment the count of link messages */ diff --git a/src/H5Oint.c b/src/H5Oint.c index e79012c1182..e894df9037d 100644 --- a/src/H5Oint.c +++ b/src/H5Oint.c @@ -1041,7 +1041,6 @@ H5O_protect(const H5O_loc_t *loc, unsigned prot_flags, bool pin_all_chunks) curr_msg = 0; while (curr_msg < cont_msg_info.nmsgs) { H5O_chunk_proxy_t *chk_proxy; /* Proxy for chunk, to bring it into memory */ - unsigned chunkno; /* Chunk number for chunk proxy */ size_t chkcnt = oh->nchunks; /* Count of chunks (for sanity checking) */ /* Bring the chunk into the cache */ @@ -1056,14 +1055,12 @@ H5O_protect(const H5O_loc_t *loc, unsigned prot_flags, bool pin_all_chunks) /* Sanity check */ assert(chk_proxy->oh == oh); - chunkno = chk_proxy->chunkno; - /* Release the chunk from the cache */ if (H5AC_unprotect(loc->file, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, chk_proxy, H5AC__NO_FLAGS_SET) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to release object header chunk"); - if (chunkno != chkcnt) + if (chk_proxy->chunkno != chkcnt) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "incorrect chunk number for object header chunk"); if (oh->nchunks != (chkcnt + 1)) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, @@ -2168,7 +2165,7 @@ H5O_get_info(const H5O_loc_t *loc, H5O_info2_t *oinfo, unsigned fields) HGOTO_ERROR(H5E_OHDR, H5E_NOTFOUND, FAIL, "unable to check for MTIME message"); if (exists > 0) { /* Get "old style" modification time info */ - if (NULL == H5O_msg_read_oh(loc->file, oh, H5O_MTIME_ID, &oinfo->ctime)) + if (NULL == H5O_msg_read_oh(loc->file, oh, H5O_MTIME_ID, &oh->ctime)) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't read MTIME message"); } /* end if */ else { @@ -2177,14 +2174,17 @@ H5O_get_info(const H5O_loc_t *loc, H5O_info2_t *oinfo, unsigned fields) HGOTO_ERROR(H5E_OHDR, H5E_NOTFOUND, FAIL, "unable to check for MTIME_NEW message"); if (exists > 0) { /* Get "new style" modification time info */ - if (NULL == H5O_msg_read_oh(loc->file, oh, H5O_MTIME_NEW_ID, &oinfo->ctime)) + if (NULL == H5O_msg_read_oh(loc->file, oh, H5O_MTIME_NEW_ID, &oh->ctime)) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't read MTIME_NEW message"); } /* end if */ else - oinfo->ctime = 0; + oh->ctime = 0; } /* end else */ - } /* end else */ - } /* end if */ + + /* Get ctime field */ + oinfo->ctime = oh->ctime; + } /* end else */ + } /* end if */ /* Retrieve # of attributes */ if (fields & H5O_INFO_NUM_ATTRS) diff --git a/src/H5TSatomic.c b/src/H5TSatomic.c index db241b2b868..bf16f725ec2 100644 --- a/src/H5TSatomic.c +++ b/src/H5TSatomic.c @@ -158,6 +158,57 @@ H5TS_atomic_destroy_uint(H5TS_atomic_uint_t *obj) FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY } /* end H5TS_atomic_destroy_uint() */ +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_init_size_t + * + * Purpose: Initializes an atomic 'size_t' variable object with a value. + * + * Note: Per the C11 standard, this function is not atomic and + * concurrent execution from multiple threads is a data race. + * + * Return: None + * + *-------------------------------------------------------------------------- + */ +void +H5TS_atomic_init_size_t(H5TS_atomic_size_t *obj, size_t desired) +{ + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + /* Initialize mutex that protects the "atomic" value */ + (void) + H5TS_mutex_init(&obj->mutex, H5TS_MUTEX_TYPE_PLAIN); + + /* Set the value */ + obj->value = desired; + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS_atomic_init_size_t() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_destroy_size_t + * + * Purpose: Destroys / releases resources for an atomic 'size_t' variable + * + * Note: No equivalent in the C11 atomics, but needed here, to destroy + * the mutex used to protect the atomic value. + * + * Return: None + * + *-------------------------------------------------------------------------- + */ +void +H5TS_atomic_destroy_size_t(H5TS_atomic_size_t *obj) +{ + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + /* Destroy mutex that protects the "atomic" value */ + (void) + H5TS_mutex_destroy(&obj->mutex); + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS_atomic_destroy_size_t() */ + /*-------------------------------------------------------------------------- * Function: H5TS_atomic_init_voidp * diff --git a/src/H5TSatomic.h b/src/H5TSatomic.h index 933bf1a11f7..f0d57b7d758 100644 --- a/src/H5TSatomic.h +++ b/src/H5TSatomic.h @@ -277,6 +277,118 @@ H5TS_atomic_fetch_sub_uint(H5TS_atomic_uint_t *obj, unsigned arg) return ret_value; } /* end H5TS_atomic_fetch_sub_uint() */ +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_load_size_t + * + * Purpose: Retrieves the value of atomic 'size_t' variable object. + * + * Return: Value of the atomic 'size_t' + * + *-------------------------------------------------------------------------- + */ +static inline size_t +H5TS_atomic_load_size_t(H5TS_atomic_size_t *obj) +{ + size_t ret_value; + + /* Lock mutex that protects the "atomic" value */ + H5TS_mutex_lock(&obj->mutex); + + /* Get the value */ + ret_value = obj->value; + + /* Release the object's mutex */ + H5TS_mutex_unlock(&obj->mutex); + + return ret_value; +} /* end H5TS_atomic_load_size_t() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_store_size_t + * + * Purpose: Atomically replaces the value of the atomic 'size_t' variable + * + * Return: None + * + *-------------------------------------------------------------------------- + */ +static inline void +H5TS_atomic_store_size_t(H5TS_atomic_size_t *obj, size_t desired) +{ + /* Lock mutex that protects the "atomic" value */ + H5TS_mutex_lock(&obj->mutex); + + /* Set the value */ + obj->value = desired; + + /* Release the object's mutex */ + H5TS_mutex_unlock(&obj->mutex); + + return; +} /* end H5TS_atomic_store_size_t() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_fetch_add_size_t + * + * Purpose: Atomically replaces the value of an atomic 'size_t' variable with the + * result of addition of the 'arg' to the old value of the + * atomic variable. + * + * Return: Returns the value of the atomic variable held previously + * + *-------------------------------------------------------------------------- + */ +static inline size_t +H5TS_atomic_fetch_add_size_t(H5TS_atomic_size_t *obj, size_t arg) +{ + size_t ret_value; + + /* Lock mutex that protects the "atomic" value */ + H5TS_mutex_lock(&obj->mutex); + + /* Get the current value */ + ret_value = obj->value; + + /* Increment the value */ + obj->value += arg; + + /* Release the object's mutex */ + H5TS_mutex_unlock(&obj->mutex); + + return ret_value; +} /* end H5TS_atomic_fetch_add_size_t() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_fetch_sub_size_t + * + * Purpose: Atomically replaces the value of an atomic 'size_t' variable with the + * result of subtracting the 'arg' from the old value of the + * atomic variable. + * + * Return: Returns the value of the atomic variable held previously + * + *-------------------------------------------------------------------------- + */ +static inline size_t +H5TS_atomic_fetch_sub_size_t(H5TS_atomic_size_t *obj, size_t arg) +{ + size_t ret_value; + + /* Lock mutex that protects the "atomic" value */ + H5TS_mutex_lock(&obj->mutex); + + /* Get the current value */ + ret_value = obj->value; + + /* Decrement the value */ + obj->value -= arg; + + /* Release the object's mutex */ + H5TS_mutex_unlock(&obj->mutex); + + return ret_value; +} /* end H5TS_atomic_fetch_sub_size_t() */ + /*-------------------------------------------------------------------------- * Function: H5TS_atomic_exchange_voidp * diff --git a/src/H5TSdlftt_mutex.c b/src/H5TSdlftt_mutex.c new file mode 100644 index 00000000000..14bca84a7b2 --- /dev/null +++ b/src/H5TSdlftt_mutex.c @@ -0,0 +1,110 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: This file contains support for mutex locks, equivalent to the + * pthread 'pthread_mutex_t' type and capabilities, but efficiently + * obeying the "DLFFT" locking protocol. + * + * Note: Because this threadsafety framework operates outside the library, + * it does not use the error stack (although it does use error macros + * that don't push errors on a stack) and only uses the "namecheck only" + * FUNC_ENTER_* / FUNC_LEAVE_* macros. + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5TSmodule.h" /* This source code file is part of the H5TS module */ + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5TSpkg.h" /* Threadsafety */ + +#ifdef H5_HAVE_THREADS + +/****************/ +/* Local Macros */ +/****************/ + +/******************/ +/* Local Typedefs */ +/******************/ + +/********************/ +/* Local Prototypes */ +/********************/ + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +/*------------------------------------------------------------------------- + * Function: H5TS_dlftt_mutex_init + * + * Purpose: Initialize a H5TS_dlftt_mutex_t (does not allocate it) + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5TS_dlftt_mutex_init(H5TS_dlftt_mutex_t *mutex) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + if (H5_UNLIKELY(H5TS_mutex_init(&mutex->mtx, H5TS_MUTEX_TYPE_PLAIN)) < 0) + HGOTO_DONE(FAIL); + mutex->dlftt = 0; + +done: + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS_dlftt_mutex_init() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_dlftt_mutex_destroy + * + * Purpose: Destroy a H5TS_dlftt_mutex_t (does not free it) + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5TS_dlftt_mutex_destroy(H5TS_dlftt_mutex_t *mutex) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + if (H5_UNLIKELY(H5TS_mutex_destroy(&mutex->mtx) < 0)) + HGOTO_DONE(FAIL); + +done: + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS_dlftt_mutex_destroy() */ + +#endif /* H5_HAVE_THREADS */ diff --git a/src/H5TSdlftt_mutex.h b/src/H5TSdlftt_mutex.h new file mode 100644 index 00000000000..2537ebfd422 --- /dev/null +++ b/src/H5TSdlftt_mutex.h @@ -0,0 +1,106 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: This file contains support for mutex locks, equivalent to the + * pthread 'pthread_mutex_t' type and capabilities, but efficiently + * obeying the "DLFFT" locking protocol. + * + * Note: Because this threadsafety framework operates outside the library, + * it does not use the error stack (although it does use error macros + * that don't push errors on a stack) and only uses the "namecheck only" + * FUNC_ENTER_* / FUNC_LEAVE_* macros. + */ + +/****************/ +/* Module Setup */ +/****************/ + +/***********/ +/* Headers */ +/***********/ + +#ifndef H5TS__get_dlftt_DEF +#define H5TS__get_dlftt_DEF +/* Declare this routine here also, to avoid including package header */ +H5_DLL herr_t H5TS__get_dlftt(unsigned *dlftt); +#endif /* H5TS__get_dlftt_DEF */ + +/****************/ +/* Local Macros */ +/****************/ + +/******************/ +/* Local Typedefs */ +/******************/ + +/********************/ +/* Local Prototypes */ +/********************/ + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +/*-------------------------------------------------------------------------- + * Function: H5TS_dlftt_mutex_acquire + * + * Purpose: Acquires the lock on a mutex, obeying the "DLFTT" protocol + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_dlftt_mutex_acquire(H5TS_dlftt_mutex_t *mtx) +{ + /* Query the DLFTT value */ + if (H5_UNLIKELY(H5TS__get_dlftt(&mtx->dlftt) < 0)) + return FAIL; + + /* Don't acquire the mutex if locking is disabled */ + if (0 == mtx->dlftt) + /* Acquire the mutex */ + if (H5_UNLIKELY(H5TS_mutex_lock(&mtx->mtx) < 0)) + return FAIL; + + return SUCCEED; +} /* end H5TS_dlftt_mutex_acquire() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS_dlftt_mutex_release + * + * Purpose: Releases the lock on a mutex, obeying the "DLFTT" protocol + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_dlftt_mutex_release(H5TS_dlftt_mutex_t *mtx) +{ + /* Don't release the mutex if locking is disabled */ + if (0 == mtx->dlftt) + /* Release the mutex */ + if (H5_UNLIKELY(H5TS_mutex_unlock(&mtx->mtx) < 0)) + return FAIL; + + return SUCCEED; +} /* end H5TS_dlftt_mutex_release() */ diff --git a/src/H5TSint.c b/src/H5TSint.c index 7edc46f5ada..b803891de50 100644 --- a/src/H5TSint.c +++ b/src/H5TSint.c @@ -78,7 +78,6 @@ typedef struct H5TS_tinfo_node_t { /********************/ static H5TS_tinfo_node_t *H5TS__tinfo_create(void); #ifdef H5_HAVE_CONCURRENCY -static herr_t H5TS__get_dlftt(unsigned *dlftt); static herr_t H5TS__set_dlftt(unsigned dlftt); static herr_t H5TS__inc_dlftt(void); static herr_t H5TS__dec_dlftt(void); @@ -112,6 +111,11 @@ static uint64_t H5TS_next_thrd_id_s = 0; /* Mutex for access to H5TS_tinfo_next_free_s and H5TS_next_thrd_id_s */ static H5TS_mutex_t H5TS_tinfo_mtx_s; +#ifdef H5_HAVE_CONCURRENCY +/* "bootstrap" mutex for deferred initialization of global variables */ +H5TS_dlftt_mutex_t H5TS_bootstrap_mtx_g; +#endif /* H5_HAVE_CONCURRENCY */ + /*-------------------------------------------------------------------------- NAME H5TS__init_package -- Initialize interface-specific information @@ -466,6 +470,12 @@ H5TS__tinfo_init(void) if (H5_UNLIKELY(H5TS_mutex_init(&H5TS_tinfo_mtx_s, H5TS_MUTEX_TYPE_PLAIN)) < 0) ret_value = FAIL; +#ifdef H5_HAVE_CONCURRENCY + /* Initialize the mutex for initialization of global variables */ + if (H5_UNLIKELY(H5TS_dlftt_mutex_init(&H5TS_bootstrap_mtx_g)) < 0) + ret_value = FAIL; +#endif /* H5_HAVE_CONCURRENCY */ + /* Initialize key for thread-specific API contexts */ #ifdef H5_HAVE_WIN_THREADS if (H5_UNLIKELY(H5TS_key_create(&H5TS_thrd_info_key_g, NULL) < 0)) @@ -665,7 +675,7 @@ H5TS_get_err_stack(void) * *-------------------------------------------------------------------------- */ -static herr_t +herr_t H5TS__get_dlftt(unsigned *dlftt) { H5TS_tinfo_node_t *tinfo_node; @@ -872,6 +882,12 @@ H5TS__tinfo_term(void) if (H5_UNLIKELY(H5TS_mutex_unlock(&H5TS_tinfo_mtx_s) < 0)) HGOTO_DONE(FAIL); +#ifdef H5_HAVE_CONCURRENCY + /* Destroy mutex for bootstrapping global variables */ + if (H5_UNLIKELY(H5TS_dlftt_mutex_destroy(&H5TS_bootstrap_mtx_g) < 0)) + HGOTO_DONE(FAIL); +#endif /* H5_HAVE_CONCURRENCY */ + /* Release critical section / mutex for modifying the thread info globals */ if (H5_UNLIKELY(H5TS_mutex_destroy(&H5TS_tinfo_mtx_s) < 0)) HGOTO_DONE(FAIL); diff --git a/src/H5TSpkg.h b/src/H5TSpkg.h index 0b5ab28558c..e8617c0b65c 100644 --- a/src/H5TSpkg.h +++ b/src/H5TSpkg.h @@ -237,6 +237,10 @@ H5_DLL herr_t H5TS__api_mutex_release(unsigned *lock_count); H5_DLL herr_t H5TS__tinfo_init(void); H5_DLL void H5TS__tinfo_destroy(void *tinfo_node); H5_DLL herr_t H5TS__tinfo_term(void); +#ifndef H5TS__get_dlftt_DEF +#define H5TS__get_dlftt_DEF +H5_DLL herr_t H5TS__get_dlftt(unsigned *dlftt); +#endif /* H5TS__get_dlftt_DEF */ #endif /* H5_HAVE_THREADSAFE_API */ /* Recursive R/W lock related function declarations */ diff --git a/src/H5TSprivate.h b/src/H5TSprivate.h index 36a35de7823..fff9476f186 100644 --- a/src/H5TSprivate.h +++ b/src/H5TSprivate.h @@ -100,6 +100,14 @@ #define H5TS_atomic_fetch_sub_uint(obj, arg) atomic_fetch_sub((obj), (arg)) #define H5TS_atomic_destroy_uint(obj) /* void */ +/* atomic_size_t */ +#define H5TS_atomic_init_size_t(obj, desired) atomic_init((obj), (desired)) +#define H5TS_atomic_load_size_t(obj) atomic_load(obj) +#define H5TS_atomic_store_size_t(obj, desired) atomic_store((obj), (desired)) +#define H5TS_atomic_fetch_add_size_t(obj, arg) atomic_fetch_add((obj), (arg)) +#define H5TS_atomic_fetch_sub_size_t(obj, arg) atomic_fetch_sub((obj), (arg)) +#define H5TS_atomic_destroy_size_t(obj) /* void */ + /* atomic_voidp */ #define H5TS_atomic_init_voidp(obj, desired) atomic_init((obj), (desired)) #define H5TS_atomic_exchange_voidp(obj, desired) atomic_exchange((obj), (desired)) @@ -218,9 +226,10 @@ typedef void (*H5TS_once_init_func_t)(void); /* Atomics */ #if defined(H5_HAVE_STDATOMIC_H) && !defined(__cplusplus) -typedef atomic_int H5TS_atomic_int_t; -typedef atomic_uint H5TS_atomic_uint_t; -/* Suppress warning about _Atomic being a C11 extension */ +typedef atomic_int H5TS_atomic_int_t; +typedef atomic_uint H5TS_atomic_uint_t; +typedef atomic_size_t H5TS_atomic_size_t; +/* Suppress warning about _Atomic keyword not supported in C99 */ H5_WARN_C11_EXTENSIONS_OFF typedef void *_Atomic H5TS_atomic_voidp_t; H5_WARN_C11_EXTENSIONS_ON @@ -233,6 +242,10 @@ typedef struct { H5TS_mutex_t mutex; unsigned value; } H5TS_atomic_uint_t; +typedef struct { + H5TS_mutex_t mutex; + size_t value; +} H5TS_atomic_size_t; typedef struct { H5TS_mutex_t mutex; void *value; @@ -289,10 +302,19 @@ typedef struct H5TS_semaphore_t { typedef atomic_flag H5TS_spinlock_t; #endif +/* Mutex that efficiently obeys the "DLFTT" locking protocol */ +typedef struct H5TS_dlftt_mutex_t { + H5TS_mutex_t mtx; + unsigned dlftt; +} H5TS_dlftt_mutex_t; + /*****************************/ /* Library-private Variables */ /*****************************/ +/* Global "bootstrapping" mutex */ +extern H5TS_dlftt_mutex_t H5TS_bootstrap_mtx_g; + /***************************************/ /* Library-private Function Prototypes */ /***************************************/ @@ -331,6 +353,11 @@ H5_DLL herr_t H5TS_mutex_init(H5TS_mutex_t *mutex, int type); H5_DLL herr_t H5TS_mutex_trylock(H5TS_mutex_t *mutex, bool *acquired) H5TS_TRY_ACQUIRE(SUCCEED, *mutex); H5_DLL herr_t H5TS_mutex_destroy(H5TS_mutex_t *mutex); +/* "DLFTT" aware mutex operations */ +H5_DLL herr_t H5TS_dlftt_mutex_init(H5TS_dlftt_mutex_t *mutex); +/* DLFTT mutex lock & unlock calls are defined in H5TSdlftt_mutex.h */ +H5_DLL herr_t H5TS_dlftt_mutex_destroy(H5TS_dlftt_mutex_t *mutex); + /* R/W locks */ H5_DLL herr_t H5TS_rwlock_init(H5TS_rwlock_t *lock); /* R/W lock & unlock calls are defined in H5TSrwlock.h */ @@ -388,6 +415,15 @@ static inline unsigned H5TS_atomic_fetch_add_uint(H5TS_atomic_uint_t *obj, unsig static inline unsigned H5TS_atomic_fetch_sub_uint(H5TS_atomic_uint_t *obj, unsigned arg); H5_DLL void H5TS_atomic_destroy_uint(H5TS_atomic_uint_t *obj); +/* atomic_size_t */ +H5_DLL void H5TS_atomic_init_size_t(H5TS_atomic_size_t *obj, size_t desired); +/* Atomic 'size_t' load, store, etc. calls are defined in H5TSatomic.h */ +static inline size_t H5TS_atomic_load_size_t(H5TS_atomic_size_t *obj); +static inline void H5TS_atomic_store_size_t(H5TS_atomic_size_t *obj, size_t desired); +static inline size_t H5TS_atomic_fetch_add_size_t(H5TS_atomic_size_t *obj, size_t arg); +static inline size_t H5TS_atomic_fetch_sub_size_t(H5TS_atomic_size_t *obj, size_t arg); +H5_DLL void H5TS_atomic_destroy_size_t(H5TS_atomic_size_t *obj); + /* void * _Atomic (atomic void pointer) */ H5_DLL void H5TS_atomic_init_voidp(H5TS_atomic_voidp_t *obj, void *desired); /* Atomic 'void *' load, store, etc. calls are defined in H5TSatomic.h */ @@ -417,6 +453,7 @@ H5_DLL herr_t H5TS_semaphore_destroy(H5TS_semaphore_t *sem); #ifndef __cplusplus #include "H5TScond.h" #include "H5TSmutex.h" +#include "H5TSdlftt_mutex.h" #include "H5TSkey.h" #ifndef H5_HAVE_STDATOMIC_H #include "H5TSatomic.h" @@ -427,6 +464,34 @@ H5_DLL herr_t H5TS_semaphore_destroy(H5TS_semaphore_t *sem); #include "H5TSpool.h" #endif /* __cplusplus */ +#else /* H5_HAVE_THREADS */ + +/* Aliases for atomic types used when single-threaded */ +typedef size_t H5TS_atomic_size_t; +#define H5TS_atomic_init_size_t(obj, desired) *(obj) = (desired) +#define H5TS_atomic_load_size_t(obj) *(obj) +#define H5TS_atomic_store_size_t(obj, desired) *(obj) = (desired) +#define H5TS_atomic_fetch_add_size_t(obj, arg) *(obj) += (arg) +#define H5TS_atomic_fetch_sub_size_t(obj, arg) *(obj) -= (arg) +#define H5TS_atomic_destroy_size_t(obj) /* */ + #endif /* H5_HAVE_THREADS */ +/* Wrappers for atomics that are used for concurrent multithreaded support */ +#ifdef H5_HAVE_CONCURRENCY +#define H5TS_ATOMIC_INIT_SIZE_T(obj, desired) H5TS_atomic_init_size_t(obj, desired) +#define H5TS_ATOMIC_LOAD_SIZE_T(obj) H5TS_atomic_load_size_t(obj) +#define H5TS_ATOMIC_STORE_SIZE_T(obj, desired) H5TS_atomic_store_size_t(obj, desired) +#define H5TS_ATOMIC_FETCH_ADD_SIZE_T(obj, arg) H5TS_atomic_fetch_add_size_t(obj, arg) +#define H5TS_ATOMIC_FETCH_SUB_SIZE_T(obj, arg) H5TS_atomic_fetch_sub_size_t(obj, arg) +#define H5TS_ATOMIC_DESTROY_SIZE_T(obj) H5TS_atomic_destroy_size_t(obj) +#else /* H5_HAVE_CONCURRENCY */ +#define H5TS_ATOMIC_INIT_SIZE_T(obj, desired) *(obj) = (desired) +#define H5TS_ATOMIC_LOAD_SIZE_T(obj) *(obj) +#define H5TS_ATOMIC_STORE_SIZE_T(obj, desired) *(obj) = (desired) +#define H5TS_ATOMIC_FETCH_ADD_SIZE_T(obj, arg) *(obj) += (arg) +#define H5TS_ATOMIC_FETCH_SUB_SIZE_T(obj, arg) *(obj) -= (arg) +#define H5TS_ATOMIC_DESTROY_SIZE_T(obj) /* */ +#endif /* H5_HAVE_CONCURRENCY */ + #endif /* H5TSprivate_H_ */ diff --git a/src/H5private.h b/src/H5private.h index 769e6c93d1b..8bf7fc3ce28 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -1742,6 +1742,58 @@ H5_PKG_DECLARE_FUNC(H5_MY_PKG_INIT, H5_MY_PKG) #define HDcompile_assert(e) do { typedef struct { unsigned int b: (e); } x; } while(0) */ +/* Safely call an initialization routine for a global variable. + * + * Note that this currently assumes that the global variable is a struct + * containing a field of type H5_global_t as its first field. + */ +typedef struct H5_global_t { + bool init; /* Whether the global has been initialized */ +} H5_global_t; + +#ifdef H5_HAVE_CONCURRENCY + +/* Mechanism for implementing double-checked locking protocol (DCLP) for global + * variables with deferred initialization (i.e. not at library init time). + * + * This is invoked from a single thread while blocking other threads from + * using the global until initialization is completed. + * + * FYI: https://preshing.com/20130930/double-checked-locking-is-fixed-in-cpp11/ + */ +#define H5_GLOBAL_INIT(v, f, maj, min, err_ret, ...) \ + do { \ + if (H5_UNLIKELY(!((H5_global_t *)(v))->init)) { \ + if (H5_UNLIKELY(H5TS_dlftt_mutex_acquire(&H5TS_bootstrap_mtx_g) < 0)) \ + HGOTO_ERROR((maj), H5E_CANTLOCK, (err_ret), "can't acquire global bootstrap mutex"); \ + if (!((H5_global_t *)(v))->init) { \ + /* Invoke the init function */ \ + if (H5_UNLIKELY((f)(v) < 0)) \ + HGOTO_ERROR((maj), (min), (err_ret), __VA_ARGS__); \ + \ + /* Indicate that the free list is initialized */ \ + H5_GLOBAL_SET_INIT(v, true); \ + } \ + if (H5_UNLIKELY(H5TS_dlftt_mutex_release(&H5TS_bootstrap_mtx_g) < 0)) \ + HGOTO_ERROR((maj), H5E_CANTUNLOCK, (err_ret), "can't release global bootstrap mutex"); \ + } \ + } while (0) +#else /* H5_HAVE_CONCURRENCY */ +#define H5_GLOBAL_INIT(v, f, maj, min, err_ret, ...) \ + do { \ + if (H5_UNLIKELY(!((H5_global_t *)(v))->init)) { \ + /* Invoke the init function */ \ + if (H5_UNLIKELY((f)(v) < 0)) \ + HGOTO_ERROR((maj), (min), (err_ret), __VA_ARGS__); \ + \ + /* Indicate that the free list is initialized */ \ + H5_GLOBAL_SET_INIT(v, true); \ + } \ + } while (0) +#endif /* H5_HAVE_CONCURRENCY */ +#define H5_GLOBAL_IS_INIT(v) (((H5_global_t *)(v))->init) +#define H5_GLOBAL_SET_INIT(v, x) ((H5_global_t *)(v))->init = (x) + /* File-independent encode/decode routines */ #include "H5encode.h" diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index b8e44fd00ef..87dbfe52a9d 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -362,6 +362,7 @@ set (ttsafe_SOURCES ${HDF5_TEST_SOURCE_DIR}/ttsafe_thread_id.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_thread_pool.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_error_stacks.c + ${HDF5_TEST_SOURCE_DIR}/ttsafe_h5fl.c ) set (H5_EXPRESS_TESTS diff --git a/test/dsets.c b/test/dsets.c index b9e1d342a7d..f9155e244b2 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -15931,23 +15931,26 @@ test_dcpl_layout_caching(H5D_layout_t layout_type) hid_t src_space_id = H5I_INVALID_HID; hid_t src_files[DCPL_LAYOUT_NUM_SRC_DSETS]; hid_t src_dsets[DCPL_LAYOUT_NUM_SRC_DSETS]; + char src_fname[DCPL_LAYOUT_NUM_SRC_DSETS][FILENAME_BUF_SIZE]; const char *layout_msg = NULL; char test_str[FILENAME_BUF_SIZE]; switch (layout_type) { - case (H5D_COMPACT): + case H5D_COMPACT: layout_msg = "compact layout"; break; - case (H5D_CONTIGUOUS): + case H5D_CONTIGUOUS: layout_msg = "contiguous layout"; break; - case (H5D_CHUNKED): + case H5D_CHUNKED: layout_msg = "chunked layout"; break; - case (H5D_VIRTUAL): + case H5D_VIRTUAL: layout_msg = "virtual layout"; break; + case H5D_LAYOUT_ERROR: + case H5D_NLAYOUTS: default: TEST_ERROR; break; @@ -16001,10 +16004,9 @@ test_dcpl_layout_caching(H5D_layout_t layout_type) /* Create source files and datasets */ for (int i = 0; i < DCPL_LAYOUT_NUM_SRC_DSETS; i++) { - char src_fname[FILENAME_BUF_SIZE]; char src_dname[FILENAME_BUF_SIZE]; - if (snprintf(src_fname, FILENAME_BUF_SIZE, "%d%s%s.h5", i, DCPL_LAYOUT_FILENAME, "_src") >= + if (snprintf(src_fname[i], FILENAME_BUF_SIZE, "%s%s%d.h5", DCPL_LAYOUT_FILENAME, "_src", i) >= FILENAME_BUF_SIZE) TEST_ERROR; @@ -16012,7 +16014,7 @@ test_dcpl_layout_caching(H5D_layout_t layout_type) FILENAME_BUF_SIZE) TEST_ERROR; - if ((src_files[i] = H5Fcreate(src_fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) + if ((src_files[i] = H5Fcreate(src_fname[i], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; if ((src_dsets[i] = H5Dcreate2(src_files[i], src_dname, type_id, src_space_id, H5P_DEFAULT, @@ -16031,7 +16033,7 @@ test_dcpl_layout_caching(H5D_layout_t layout_type) TEST_ERROR; /* Map destination selection to src selection */ - if ((H5Pset_virtual(dcpl_id, space_id, src_fname, src_dname, src_space_id)) < 0) + if ((H5Pset_virtual(dcpl_id, space_id, src_fname[i], src_dname, src_space_id)) < 0) TEST_ERROR; } @@ -16044,6 +16046,8 @@ test_dcpl_layout_caching(H5D_layout_t layout_type) TEST_ERROR; break; + case H5D_LAYOUT_ERROR: + case H5D_NLAYOUTS: default: TEST_ERROR; break; @@ -16128,6 +16132,8 @@ test_dcpl_layout_caching(H5D_layout_t layout_type) if (H5Dclose(src_dsets[i]) < 0) TEST_ERROR; + + HDremove(src_fname[i]); } } PASSED(); @@ -16167,15 +16173,17 @@ test_dcpl_layout_caching(H5D_layout_t layout_type) static int test_vds_shared_strings(hid_t fapl) { - char filename[FILENAME_BUF_SIZE]; - hid_t file_id = H5I_INVALID_HID; /* File */ - hid_t dcpl_id = H5I_INVALID_HID; /* Dataset creation property list */ - hid_t src_space_id = H5I_INVALID_HID; /* Source dataspace */ - hid_t virt_space_id = H5I_INVALID_HID; /* Virtual dataspace */ - hid_t dset_id = H5I_INVALID_HID; /* Virtual dataset */ - hsize_t dims[1] = {10}; /* Dataset dimensions */ - H5O_storage_virtual_t *virt_layout = NULL; /* Virtual storage layout */ - H5D_t *dset_int = NULL; /* Internal dataset structure */ + char filename[FILENAME_BUF_SIZE] = {0}; + hid_t file_id = H5I_INVALID_HID; /* File */ + hid_t dcpl_id = H5I_INVALID_HID; /* Dataset creation property list */ + hid_t src_space_id = H5I_INVALID_HID; /* Source dataspace */ + hid_t virt_space_id = H5I_INVALID_HID; /* Virtual dataspace */ + hid_t dset_id = H5I_INVALID_HID; /* Virtual dataset */ + hsize_t dims[1] = {10}; /* Dataset dimensions */ + H5O_storage_virtual_t *virt_layout = NULL; /* Virtual storage layout */ + H5D_t *dset_int = NULL; /* Internal dataset structure */ + char file_name[64]; + char dset_name[64]; TESTING("VDS sharing of file/dataset names"); @@ -16891,23 +16899,16 @@ test_vds_shared_strings(hid_t fapl) * - Every 5th mapping uses "/shared_dataset" * - Others use unique file/dataset names */ - char file_name[64]; - char dset_name[64]; - for (int i = 0; i < NUM_MAPPINGS_MANY; i++) { - if (i % 10 == 0) { + if (i % 10 == 0) strcpy(file_name, "shared_file.h5"); - } - else { + else snprintf(file_name, sizeof(file_name), "file_%d.h5", i); - } - if (i % 5 == 0) { + if (i % 5 == 0) strcpy(dset_name, "/shared_dataset"); - } - else { + else snprintf(dset_name, sizeof(dset_name), "/dataset_%d", i); - } if (H5Pset_virtual(dcpl_id, virt_space_id, file_name, dset_name, src_space_id) < 0) TEST_ERROR; @@ -17406,7 +17407,8 @@ main(void) nerrors += (test_dcpl_layout_caching(H5D_COMPACT) < 0 ? 1 : 0); nerrors += (test_dcpl_layout_caching(H5D_CONTIGUOUS) < 0 ? 1 : 0); nerrors += (test_dcpl_layout_caching(H5D_CHUNKED) < 0 ? 1 : 0); - nerrors += (test_dcpl_layout_caching(H5D_VIRTUAL) < 0 ? 1 : 0); + if (driver_is_default_compatible) + nerrors += (test_dcpl_layout_caching(H5D_VIRTUAL) < 0 ? 1 : 0); /* Verify that source file/dataset names are shared properly */ nerrors += (test_vds_shared_strings(fapl) < 0 ? 1 : 0); @@ -17414,6 +17416,7 @@ main(void) if (nerrors) goto error; printf("All dataset tests passed.\n"); + HDremove(DCPL_LAYOUT_FILENAME); #ifdef H5_HAVE_FILTER_SZIP HDremove(NOENCODER_COPY_FILENAME); #endif /* H5_HAVE_FILTER_SZIP */ diff --git a/test/h5test.c b/test/h5test.c index cbc4dfdebdd..3ddbc583c67 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -683,7 +683,7 @@ h5_get_vfd_fapl(hid_t fapl) goto error; snprintf(sv[mt], multi_memname_maxlen, "%%s-%c.h5", multi_letters[mt]); memb_name[mt] = sv[mt]; - memb_addr[mt] = (haddr_t)MAX(mt - 1, 0) * (HADDR_MAX / 10); + memb_addr[mt] = (haddr_t)(mt ? (mt - 1) : 0) * (HADDR_MAX / (H5FD_MEM_NTYPES - 1)); } if (H5Pset_fapl_multi(fapl, memb_map, memb_fapl, memb_name, memb_addr, false) < 0) @@ -2453,3 +2453,41 @@ h5_load_aws_profile(const char *profile_name, bool *profile_found, char *key_id_ } #endif + +/***************************************************************************** + * + * Function h5_setup_local_rand() + * + * Purpose: Either use gettimeofday() to obtain a seed or a predefined seed + * for h5_local_rand(), print the seed to stdout, and then pass it + * to h5_local_srand(). + * + * Return: void. + * + *****************************************************************************/ +void +h5_setup_local_rand(const char *test_name, unsigned predefined_seed) +{ + unsigned seed; + + if (0 != predefined_seed) + seed = predefined_seed; + else { + struct timeval tv; + + if (HDgettimeofday(&tv, NULL) != 0) { + fprintf(stdout, "\n%s: gettimeofday() failed -- srand() not called.\n\n", test_name); + fflush(stdout); + + return; + } + + seed = (unsigned)tv.tv_usec; + } + + fprintf(stdout, "%s: seed = %d\n", test_name, seed); + fflush(stdout); + + h5_local_srand(seed); + +} /* h5_setup_local_rand() */ diff --git a/test/h5test.h b/test/h5test.h index 8096081a302..e9012f99fd3 100644 --- a/test/h5test.h +++ b/test/h5test.h @@ -1213,6 +1213,23 @@ H5TEST_DLL int h5_local_rand(void); */ H5TEST_DLL void h5_local_srand(unsigned int seed); +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Set up h5_local_rand() with a unique or predefined seed. + * + * \return none + * + * \details Either use gettimeofday() to obtain a seed or a predefined seed + * for h5_local_rand(), print the seed to stdout, and then pass it + * to h5_local_srand(). + * + * \see h5_local_srand() + * + */ +H5TEST_DLL void h5_setup_local_rand(const char *test_name, unsigned predefined_seed); + #ifdef H5_HAVE_FILTER_SZIP /** * -------------------------------------------------------------------------- diff --git a/test/tmisc.c b/test/tmisc.c index 2d0608301c7..e6a8de4206a 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -326,6 +326,8 @@ typedef struct { #define MISC33_FILE "bad_offset.h5" /* Definitions for misc. test #35 */ +#define MISC35_FILE "tmisc35.h5" +#define MISC35_GROUPNAME "group" #define MISC35_SPACE_RANK 3 #define MISC35_SPACE_DIM1 3 #define MISC35_SPACE_DIM2 15 @@ -6076,15 +6078,22 @@ test_misc34(void) static void test_misc35(void) { + hid_t file, group; hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ hsize_t dims[] = {MISC35_SPACE_DIM1, MISC35_SPACE_DIM2, MISC35_SPACE_DIM3}; /* Dataspace dims */ hsize_t coord[MISC35_NPOINTS][MISC35_SPACE_RANK] = /* Coordinates for point selection */ {{0, 10, 5}, {1, 2, 7}, {2, 4, 9}, {0, 6, 11}, {1, 8, 13}, {2, 12, 0}, {0, 14, 2}, {1, 0, 4}, {2, 1, 6}, {0, 3, 8}}; +#if !defined H5_NO_FREE_LISTS && !defined H5_USING_MEMCHECKER size_t reg_size_start; /* Initial amount of regular memory allocated */ size_t arr_size_start; /* Initial amount of array memory allocated */ size_t blk_size_start; /* Initial amount of block memory allocated */ size_t fac_size_start; /* Initial amount of factory memory allocated */ +#endif + size_t reg_size_mid; /* Mid-point amount of regular memory allocated */ + size_t arr_size_mid; /* Mid-point amount of array memory allocated */ + size_t blk_size_mid; /* Mid-point amount of block memory allocated */ + size_t fac_size_mid; /* Mid-point amount of factory memory allocated */ size_t reg_size_final; /* Final amount of regular memory allocated */ size_t arr_size_final; /* Final amount of array memory allocated */ size_t blk_size_final; /* Final amount of block memory allocated */ @@ -6094,8 +6103,19 @@ test_misc35(void) /* Output message about test being performed */ MESSAGE(5, ("Free-list API calls")); +#if !defined H5_NO_FREE_LISTS && !defined H5_USING_MEMCHECKER + /* Garbage collect the free lists, so there's nothing on the free lists */ + /* (There may still be allocated memory of various free-list types) */ + ret = H5garbage_collect(); + CHECK(ret, FAIL, "H5garbage_collect"); + + /* Retrieve free list values */ + ret = H5get_free_list_sizes(®_size_start, &arr_size_start, &blk_size_start, &fac_size_start); + CHECK(ret, FAIL, "H5get_free_list_sizes"); +#endif + /* Create dataspace */ - /* (Allocates array free-list nodes) */ + /* (Allocates regular, array, and factory free-list nodes) */ sid = H5Screate_simple(MISC35_SPACE_RANK, dims, NULL); CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); @@ -6107,22 +6127,41 @@ test_misc35(void) ret = H5Sclose(sid); CHECK(ret, FAIL, "H5Sclose"); + /* Create a file */ + file = H5Fcreate(MISC35_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create a group */ + /* (Allocates block free-list nodes) */ + group = H5Gcreate2(file, MISC35_GROUPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, FAIL, "H5Gcreate2"); + + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + /* Retrieve initial free list values */ - ret = H5get_free_list_sizes(®_size_start, &arr_size_start, &blk_size_start, &fac_size_start); + ret = H5get_free_list_sizes(®_size_mid, &arr_size_mid, &blk_size_mid, &fac_size_mid); CHECK(ret, FAIL, "H5get_free_list_sizes"); #if !defined H5_NO_FREE_LISTS && !defined H5_USING_MEMCHECKER - /* All the free list values should be >0 */ - CHECK(reg_size_start, 0, "H5get_free_list_sizes"); - CHECK(arr_size_start, 0, "H5get_free_list_sizes"); - CHECK(blk_size_start, 0, "H5get_free_list_sizes"); - CHECK(fac_size_start, 0, "H5get_free_list_sizes"); + /* All the mid-point free list values should be >= previous values */ + if (reg_size_mid < reg_size_start) + ERROR("reg_size_mid < reg_size_start"); + if (arr_size_mid < arr_size_start) + ERROR("arr_size_mid < arr_size_start"); + if (blk_size_mid < blk_size_start) + ERROR("blk_size_mid < blk_size_start"); + if (fac_size_mid < fac_size_start) + ERROR("fac_size_mid < fac_size_start"); #else /* All the values should be == 0 */ - VERIFY(reg_size_start, 0, "H5get_free_list_sizes"); - VERIFY(arr_size_start, 0, "H5get_free_list_sizes"); - VERIFY(blk_size_start, 0, "H5get_free_list_sizes"); - VERIFY(fac_size_start, 0, "H5get_free_list_sizes"); + VERIFY(reg_size_mid, 0, "H5get_free_list_sizes - regular"); + VERIFY(arr_size_mid, 0, "H5get_free_list_sizes - array"); + VERIFY(blk_size_mid, 0, "H5get_free_list_sizes - block"); + VERIFY(fac_size_mid, 0, "H5get_free_list_sizes - factory"); #endif /* Garbage collect the free lists */ @@ -6134,15 +6173,14 @@ test_misc35(void) CHECK(ret, FAIL, "H5get_free_list_sizes"); /* All the free list values should be <= previous values */ - if (reg_size_final > reg_size_start) - ERROR("reg_size_final > reg_size_start"); - if (arr_size_final > arr_size_start) - ERROR("arr_size_final > arr_size_start"); - if (blk_size_final > blk_size_start) - ERROR("blk_size_final > blk_size_start"); - if (fac_size_final > fac_size_start) - ERROR("fac_size_final > fac_size_start"); - + if (reg_size_final > reg_size_mid) + ERROR("reg_size_final > reg_size_mid"); + if (arr_size_final > arr_size_mid) + ERROR("arr_size_final > arr_size_mid"); + if (blk_size_final > blk_size_mid) + ERROR("blk_size_final > blk_size_mid"); + if (fac_size_final > fac_size_mid) + ERROR("fac_size_final > fac_size_mid"); } /* end test_misc35() */ /* Context to pass to 'atclose' callbacks */ diff --git a/test/ttsafe.c b/test/ttsafe.c index fde42220eb5..32abb5254ac 100644 --- a/test/ttsafe.c +++ b/test/ttsafe.c @@ -168,6 +168,14 @@ main(int argc, char *argv[]) #endif /* H5_HAVE_THREADSAFE_API */ +#ifdef H5_HAVE_CONCURRENCY + /* Test library packages' threadsafety */ + AddTest("h5fl", tts_h5fl, NULL, NULL, NULL, 0, "Multithreaded H5FL package"); +#else /* H5_HAVE_CONCURRENCY */ + /* Test library packages' threadsafety */ + AddTest("-h5fl", tts_h5fl, NULL, NULL, NULL, 0, "Multithreaded H5FL package"); +#endif /* H5_HAVE_CONCURRENCY */ + #else /* H5_HAVE_THREADS */ printf("Most threading tests skipped because THREADS not enabled\n"); diff --git a/test/ttsafe.h b/test/ttsafe.h index 6739f89bfcf..a467348f486 100644 --- a/test/ttsafe.h +++ b/test/ttsafe.h @@ -67,5 +67,8 @@ void cleanup_acreate(void *); void cleanup_attr_vlen(void *); #endif /* H5_HAVE_THREADSAFE_API */ + +/* Threadsafe package testing routines */ +void tts_h5fl(void *); #endif /* H5_HAVE_THREADS */ #endif /* TTSAFE_H */ diff --git a/test/ttsafe_h5fl.c b/test/ttsafe_h5fl.c new file mode 100644 index 00000000000..17642c1157d --- /dev/null +++ b/test/ttsafe_h5fl.c @@ -0,0 +1,2534 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the LICENSE file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/******************************************************************** + * + * Test the threadsafe correctness of the H5FL routines + * + ********************************************************************/ + +#include "ttsafe.h" + +#ifdef H5_HAVE_THREADS + +/* Library headers needed */ +#include "H5FLprivate.h" /* Free Lists */ + +/* Macros */ +#define NUM_THREADS 16 + +/* Test config */ +#define MAX_TOKENS 1024 /* Max # of tokens (buffers) active */ +#define NUM_TEST_OPS 4096 /* Number of operations in a test vector */ +#define NUM_VECTORS 16 /* Number of vectors for each thread */ +#define NUM_ITERS_PER_THREAD 64 /* Number of times to run vectors in each thread */ + +/* Types of various sizes, for regular free lists */ +typedef struct { + unsigned char buf[16]; +} h5fl_reg_test_type_1; + +typedef struct { + unsigned char buf[64]; +} h5fl_reg_test_type_2; + +typedef struct { + unsigned char buf[256]; +} h5fl_reg_test_type_3; + +typedef struct { + unsigned char buf[1]; +} h5fl_reg_test_type_4; + +typedef struct { + unsigned char buf[2]; +} h5fl_reg_test_type_5; + +typedef struct { + unsigned char buf[3]; +} h5fl_reg_test_type_6; + +typedef struct { + unsigned char buf[5]; +} h5fl_reg_test_type_7; + +typedef struct { + unsigned char buf[8]; +} h5fl_reg_test_type_8; + +typedef struct { + unsigned char buf[13]; +} h5fl_reg_test_type_9; + +typedef struct { + unsigned char buf[21]; +} h5fl_reg_test_type_10; + +typedef struct { + unsigned char buf[34]; +} h5fl_reg_test_type_11; + +typedef struct { + unsigned char buf[55]; +} h5fl_reg_test_type_12; + +/* Types of various sizes, for array free lists */ +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_1; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_2; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_3; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_4; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_5; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_6; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_7; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_8; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_9; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_10; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_11; + +typedef struct { + unsigned char buf[1]; +} h5fl_arr_test_type_12; + +/* 'regular' free lists of the various types */ +H5FL_DEFINE_STATIC(h5fl_reg_test_type_1); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_2); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_3); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_4); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_5); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_6); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_7); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_8); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_9); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_10); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_11); +H5FL_DEFINE_STATIC(h5fl_reg_test_type_12); + +/* 'block' free lists of the various types */ +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_1); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_2); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_3); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_4); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_5); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_6); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_7); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_8); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_9); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_10); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_11); +H5FL_BLK_DEFINE_STATIC(h5fl_blk_test_type_12); + +/* 'array' free lists of the various types */ +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_1, 16); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_2, 64); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_3, 256); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_4, 2); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_5, 3); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_6, 5); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_7, 8); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_8, 13); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_9, 21); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_10, 34); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_11, 55); +H5FL_ARR_DEFINE_STATIC(h5fl_arr_test_type_12, 89); + +typedef struct { + H5FL_reg_head_t *free_list; + size_t elmt_size; + unsigned char *fill1; + unsigned char *fill2; + unsigned char *fill3; + void *zero; +} h5fl_reg_type_info; + +typedef struct { + H5FL_fac_head_t *free_list; + size_t elmt_size; + unsigned char *fill1; + unsigned char *fill2; + unsigned char *fill3; + void *zero; +} h5fl_fac_type_info; + +typedef struct { + H5FL_blk_head_t *free_list; + size_t initial_size; +} h5fl_blk_type_info; + +typedef struct { + H5FL_arr_head_t *free_list; + unsigned max_size; +} h5fl_arr_type_info; + +/* Array of all the 'regular' free lists & info */ +static h5fl_reg_type_info h5fl_reg_test_types[] = { + {&H5FL_REG_NAME(h5fl_reg_test_type_1), sizeof(h5fl_reg_test_type_1), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_2), sizeof(h5fl_reg_test_type_2), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_3), sizeof(h5fl_reg_test_type_3), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_4), sizeof(h5fl_reg_test_type_4), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_5), sizeof(h5fl_reg_test_type_5), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_6), sizeof(h5fl_reg_test_type_6), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_7), sizeof(h5fl_reg_test_type_7), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_8), sizeof(h5fl_reg_test_type_8), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_9), sizeof(h5fl_reg_test_type_9), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_10), sizeof(h5fl_reg_test_type_10), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_11), sizeof(h5fl_reg_test_type_11), NULL, NULL, NULL, NULL}, + {&H5FL_REG_NAME(h5fl_reg_test_type_12), sizeof(h5fl_reg_test_type_12), NULL, NULL, NULL, NULL}, +}; + +/* Array of all the 'factory' free lists & info */ +static h5fl_fac_type_info h5fl_fac_test_types[] = { + {NULL, 16, NULL, NULL, NULL, NULL}, {NULL, 64, NULL, NULL, NULL, NULL}, + {NULL, 256, NULL, NULL, NULL, NULL}, {NULL, 1, NULL, NULL, NULL, NULL}, + {NULL, 2, NULL, NULL, NULL, NULL}, {NULL, 3, NULL, NULL, NULL, NULL}, + {NULL, 5, NULL, NULL, NULL, NULL}, {NULL, 8, NULL, NULL, NULL, NULL}, + {NULL, 13, NULL, NULL, NULL, NULL}, {NULL, 21, NULL, NULL, NULL, NULL}, + {NULL, 34, NULL, NULL, NULL, NULL}, {NULL, 55, NULL, NULL, NULL, NULL}, +}; + +/* Array of all the 'block' free lists & info */ +static h5fl_blk_type_info h5fl_blk_test_types[] = { + {&H5FL_BLK_NAME(h5fl_blk_test_type_1), 16}, {&H5FL_BLK_NAME(h5fl_blk_test_type_2), 64}, + {&H5FL_BLK_NAME(h5fl_blk_test_type_3), 256}, {&H5FL_BLK_NAME(h5fl_blk_test_type_4), 1}, + {&H5FL_BLK_NAME(h5fl_blk_test_type_5), 2}, {&H5FL_BLK_NAME(h5fl_blk_test_type_6), 3}, + {&H5FL_BLK_NAME(h5fl_blk_test_type_7), 5}, {&H5FL_BLK_NAME(h5fl_blk_test_type_8), 8}, + {&H5FL_BLK_NAME(h5fl_blk_test_type_9), 13}, {&H5FL_BLK_NAME(h5fl_blk_test_type_10), 21}, + {&H5FL_BLK_NAME(h5fl_blk_test_type_11), 34}, {&H5FL_BLK_NAME(h5fl_blk_test_type_12), 55}, +}; + +/* Array of all the 'array' free lists & info */ +static h5fl_arr_type_info h5fl_arr_test_types[] = { + {&H5FL_ARR_NAME(h5fl_arr_test_type_1), 16}, {&H5FL_ARR_NAME(h5fl_arr_test_type_2), 64}, + {&H5FL_ARR_NAME(h5fl_arr_test_type_3), 256}, {&H5FL_ARR_NAME(h5fl_arr_test_type_4), 2}, + {&H5FL_ARR_NAME(h5fl_arr_test_type_5), 3}, {&H5FL_ARR_NAME(h5fl_arr_test_type_6), 5}, + {&H5FL_ARR_NAME(h5fl_arr_test_type_7), 8}, {&H5FL_ARR_NAME(h5fl_arr_test_type_8), 13}, + {&H5FL_ARR_NAME(h5fl_arr_test_type_9), 21}, {&H5FL_ARR_NAME(h5fl_arr_test_type_10), 34}, + {&H5FL_ARR_NAME(h5fl_arr_test_type_11), 55}, {&H5FL_ARR_NAME(h5fl_arr_test_type_12), 89}, +}; + +typedef enum { + H5FL_REG_OP_MALLOC, + H5FL_REG_OP_CALLOC, + H5FL_REG_OP_ZERO, + H5FL_REG_OP_FILL1, + H5FL_REG_OP_FILL2, + H5FL_REG_OP_FILL3, + H5FL_REG_OP_FREE, +} h5fl_reg_test_op_code; + +typedef enum { + H5FL_FAC_OP_MALLOC, + H5FL_FAC_OP_CALLOC, + H5FL_FAC_OP_ZERO, + H5FL_FAC_OP_FILL1, + H5FL_FAC_OP_FILL2, + H5FL_FAC_OP_FILL3, + H5FL_FAC_OP_FREE, +} h5fl_fac_test_op_code; + +typedef enum { + H5FL_BLK_OP_MALLOC, + H5FL_BLK_OP_CALLOC, + H5FL_BLK_OP_REALLOC, + H5FL_BLK_OP_ZERO, + H5FL_BLK_OP_FILL1, + H5FL_BLK_OP_FILL2, + H5FL_BLK_OP_FILL3, + H5FL_BLK_OP_FREE, +} h5fl_blk_test_op_code; + +typedef enum { + H5FL_ARR_OP_MALLOC, + H5FL_ARR_OP_CALLOC, + H5FL_ARR_OP_REALLOC, + H5FL_ARR_OP_ZERO, + H5FL_ARR_OP_FILL1, + H5FL_ARR_OP_FILL2, + H5FL_ARR_OP_FILL3, + H5FL_ARR_OP_FREE, +} h5fl_arr_test_op_code; + +typedef enum { + H5FL_REG_ST_UNINIT, + H5FL_REG_ST_ZERO, + H5FL_REG_ST_FILL1, + H5FL_REG_ST_FILL2, + H5FL_REG_ST_FILL3 +} h5fl_reg_token_state; + +typedef enum { + H5FL_FAC_ST_UNINIT, + H5FL_FAC_ST_ZERO, + H5FL_FAC_ST_FILL1, + H5FL_FAC_ST_FILL2, + H5FL_FAC_ST_FILL3 +} h5fl_fac_token_state; + +typedef enum { + H5FL_BLK_ST_UNINIT, + H5FL_BLK_ST_ZERO, + H5FL_BLK_ST_FILL1, + H5FL_BLK_ST_FILL2, + H5FL_BLK_ST_FILL3 +} h5fl_blk_token_state; + +typedef enum { + H5FL_ARR_ST_UNINIT, + H5FL_ARR_ST_ZERO, + H5FL_ARR_ST_FILL1, + H5FL_ARR_ST_FILL2, + H5FL_ARR_ST_FILL3 +} h5fl_arr_token_state; + +typedef struct { + void *val; + unsigned type_idx; + h5fl_reg_token_state state; +} h5fl_reg_test_token; + +typedef struct { + void *val; + unsigned type_idx; + h5fl_fac_token_state state; +} h5fl_fac_test_token; + +typedef struct { + unsigned char *val; + unsigned type_idx; + size_t curr_size; + int size_shift; + h5fl_blk_token_state state; +} h5fl_blk_test_token; + +typedef struct { + unsigned char *val; + unsigned type_idx; + unsigned curr_size; + h5fl_arr_token_state state; +} h5fl_arr_test_token; + +typedef union { + unsigned type_idx; + h5fl_reg_test_token *token; +} h5fl_reg_test_op_param; + +typedef union { + unsigned type_idx; + h5fl_fac_test_token *token; +} h5fl_fac_test_op_param; + +typedef union { + unsigned type_idx; + h5fl_blk_test_token *token; + int size_shift; +} h5fl_blk_test_op_param; + +typedef union { + unsigned type_idx; + h5fl_arr_test_token *token; + unsigned rng_size; +} h5fl_arr_test_op_param; + +typedef struct { + h5fl_reg_test_op_code op_code; + h5fl_reg_test_token *token; + h5fl_reg_test_op_param param; +} h5fl_reg_test_op; + +typedef struct { + h5fl_fac_test_op_code op_code; + h5fl_fac_test_token *token; + h5fl_fac_test_op_param param; +} h5fl_fac_test_op; + +typedef struct { + h5fl_blk_test_op_code op_code; + h5fl_blk_test_token *token; + h5fl_blk_test_op_param param; +} h5fl_blk_test_op; + +typedef struct { + h5fl_arr_test_op_code op_code; + h5fl_arr_test_token *token; + h5fl_arr_test_op_param param; +} h5fl_arr_test_op; + +typedef struct { + unsigned vec_size; + h5fl_reg_test_op *op_vector; +} h5fl_reg_test_vector; + +typedef struct { + unsigned vec_size; + h5fl_fac_test_op *op_vector; +} h5fl_fac_test_vector; + +typedef struct { + unsigned vec_size; + h5fl_blk_test_op *op_vector; +} h5fl_blk_test_vector; + +typedef struct { + unsigned vec_size; + h5fl_arr_test_op *op_vector; +} h5fl_arr_test_vector; + +typedef struct { + unsigned odds; + h5fl_reg_test_op_code op_code; +} h5fl_reg_test_op_odds; + +typedef struct { + unsigned odds; + h5fl_fac_test_op_code op_code; +} h5fl_fac_test_op_odds; + +typedef struct { + unsigned odds; + h5fl_blk_test_op_code op_code; +} h5fl_blk_test_op_odds; + +typedef struct { + unsigned odds; + h5fl_arr_test_op_code op_code; +} h5fl_arr_test_op_odds; + +/* Operation odds when token array is not full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_reg_test_op_odds h5fl_reg_all_ops_odds[] = { + {221, H5FL_REG_OP_MALLOC}, /* 22.1% = H5FL_REG_OP_MALLOC */ + {221, H5FL_REG_OP_CALLOC}, /* 22.1% = H5FL_REG_OP_CALLOC */ + {64, H5FL_REG_OP_ZERO}, /* 6.4% = H5FL_REG_OP_ZERO */ + {64, H5FL_REG_OP_FILL1}, /* 6.4% = H5FL_REG_OP_FILL1 */ + {64, H5FL_REG_OP_FILL2}, /* 6.4% = H5FL_REG_OP_FILL2 */ + {64, H5FL_REG_OP_FILL3}, /* 6.4% = H5FL_REG_OP_FILL3 */ + {302, H5FL_REG_OP_FREE}, /* 30.2% = H5FL_REG_OP_FREE */ +}; + +/* Operation odds when token array is not full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_fac_test_op_odds h5fl_fac_all_ops_odds[] = { + {221, H5FL_FAC_OP_MALLOC}, /* 22.1% = H5FL_FAC_OP_MALLOC */ + {221, H5FL_FAC_OP_CALLOC}, /* 22.1% = H5FL_FAC_OP_CALLOC */ + {64, H5FL_FAC_OP_ZERO}, /* 6.4% = H5FL_FAC_OP_ZERO */ + {64, H5FL_FAC_OP_FILL1}, /* 6.4% = H5FL_FAC_OP_FILL1 */ + {64, H5FL_FAC_OP_FILL2}, /* 6.4% = H5FL_FAC_OP_FILL2 */ + {64, H5FL_FAC_OP_FILL3}, /* 6.4% = H5FL_FAC_OP_FILL3 */ + {302, H5FL_FAC_OP_FREE}, /* 30.2% = H5FL_FAC_OP_FREE */ +}; + +/* Operation odds when token array is not full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_blk_test_op_odds h5fl_blk_all_ops_odds[] = { + {171, H5FL_BLK_OP_MALLOC}, /* 17.1% = H5FL_BLK_OP_MALLOC */ + {171, H5FL_BLK_OP_CALLOC}, /* 17.1% = H5FL_BLK_OP_CALLOC */ + {200, H5FL_BLK_OP_REALLOC}, /* 20.0% = H5FL_BLK_OP_REALLOC */ + {64, H5FL_BLK_OP_ZERO}, /* 6.4% = H5FL_BLK_OP_ZERO */ + {64, H5FL_BLK_OP_FILL1}, /* 6.4% = H5FL_BLK_OP_FILL1 */ + {64, H5FL_BLK_OP_FILL2}, /* 6.4% = H5FL_BLK_OP_FILL2 */ + {64, H5FL_BLK_OP_FILL3}, /* 6.4% = H5FL_BLK_OP_FILL3 */ + {202, H5FL_BLK_OP_FREE}, /* 20.2% = H5FL_BLK_OP_FREE */ +}; + +/* Operation odds when token array is not full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_arr_test_op_odds h5fl_arr_all_ops_odds[] = { + {171, H5FL_ARR_OP_MALLOC}, /* 17.1% = H5FL_ARR_OP_MALLOC */ + {171, H5FL_ARR_OP_CALLOC}, /* 17.1% = H5FL_ARR_OP_CALLOC */ + {200, H5FL_ARR_OP_REALLOC}, /* 20.0% = H5FL_ARR_OP_REALLOC */ + {64, H5FL_ARR_OP_ZERO}, /* 6.4% = H5FL_ARR_OP_ZERO */ + {64, H5FL_ARR_OP_FILL1}, /* 6.4% = H5FL_ARR_OP_FILL1 */ + {64, H5FL_ARR_OP_FILL2}, /* 6.4% = H5FL_ARR_OP_FILL2 */ + {64, H5FL_ARR_OP_FILL3}, /* 6.4% = H5FL_ARR_OP_FILL3 */ + {202, H5FL_ARR_OP_FREE}, /* 20.2% = H5FL_ARR_OP_FREE */ +}; + +/* Operation odds when token array is full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_reg_test_op_odds h5fl_reg_full_ops_odds[] = { + {0, H5FL_REG_OP_MALLOC}, /* 0% = H5FL_REG_OP_MALLOC */ + {0, H5FL_REG_OP_CALLOC}, /* 0% = H5FL_REG_OP_CALLOC */ + {104, H5FL_REG_OP_ZERO}, /* 10.4% = H5FL_REG_OP_ZERO */ + {104, H5FL_REG_OP_FILL1}, /* 10.4% = H5FL_REG_OP_FILL1 */ + {104, H5FL_REG_OP_FILL2}, /* 10.4% = H5FL_REG_OP_FILL2 */ + {104, H5FL_REG_OP_FILL3}, /* 10.4% = H5FL_REG_OP_FILL3 */ + {584, H5FL_REG_OP_FREE}, /* 58.4% = H5FL_REG_OP_FREE */ +}; + +/* Operation odds when token array is full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_fac_test_op_odds h5fl_fac_full_ops_odds[] = { + {0, H5FL_FAC_OP_MALLOC}, /* 0% = H5FL_FAC_OP_MALLOC */ + {0, H5FL_FAC_OP_CALLOC}, /* 0% = H5FL_FAC_OP_CALLOC */ + {104, H5FL_FAC_OP_ZERO}, /* 10.4% = H5FL_FAC_OP_ZERO */ + {104, H5FL_FAC_OP_FILL1}, /* 10.4% = H5FL_FAC_OP_FILL1 */ + {104, H5FL_FAC_OP_FILL2}, /* 10.4% = H5FL_FAC_OP_FILL2 */ + {104, H5FL_FAC_OP_FILL3}, /* 10.4% = H5FL_FAC_OP_FILL3 */ + {584, H5FL_FAC_OP_FREE}, /* 58.4% = H5FL_FAC_OP_FREE */ +}; + +/* Operation odds when token array is full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_blk_test_op_odds h5fl_blk_full_ops_odds[] = { + {0, H5FL_BLK_OP_MALLOC}, /* 0% = H5FL_BLK_OP_MALLOC */ + {0, H5FL_BLK_OP_CALLOC}, /* 0% = H5FL_BLK_OP_CALLOC */ + {200, H5FL_BLK_OP_REALLOC}, /* 20.0% = H5FL_BLK_OP_REALLOC */ + {84, H5FL_BLK_OP_ZERO}, /* 8.4% = H5FL_BLK_OP_ZERO */ + {84, H5FL_BLK_OP_FILL1}, /* 8.4% = H5FL_BLK_OP_FILL1 */ + {84, H5FL_BLK_OP_FILL2}, /* 8.4% = H5FL_BLK_OP_FILL2 */ + {84, H5FL_BLK_OP_FILL3}, /* 8.4% = H5FL_BLK_OP_FILL3 */ + {464, H5FL_BLK_OP_FREE}, /* 46.4% = H5FL_BLK_OP_FREE */ +}; + +/* Operation odds when token array is full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_arr_test_op_odds h5fl_arr_full_ops_odds[] = { + {0, H5FL_ARR_OP_MALLOC}, /* 0% = H5FL_ARR_OP_MALLOC */ + {0, H5FL_ARR_OP_CALLOC}, /* 0% = H5FL_ARR_OP_CALLOC */ + {200, H5FL_ARR_OP_REALLOC}, /* 20.0% = H5FL_ARR_OP_REALLOC */ + {84, H5FL_ARR_OP_ZERO}, /* 8.4% = H5FL_ARR_OP_ZERO */ + {84, H5FL_ARR_OP_FILL1}, /* 8.4% = H5FL_ARR_OP_FILL1 */ + {84, H5FL_ARR_OP_FILL2}, /* 8.4% = H5FL_ARR_OP_FILL2 */ + {84, H5FL_ARR_OP_FILL3}, /* 8.4% = H5FL_ARR_OP_FILL3 */ + {464, H5FL_ARR_OP_FREE}, /* 46.4% = H5FL_ARR_OP_FREE */ +}; + +/* Operation odds when vector is nearly full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_reg_test_op_odds h5fl_reg_vec_almost_full_ops_odds[] = { + {0, H5FL_REG_OP_MALLOC}, /* 0% = H5FL_REG_OP_MALLOC */ + {0, H5FL_REG_OP_CALLOC}, /* 0% = H5FL_REG_OP_CALLOC */ + {250, H5FL_REG_OP_ZERO}, /* 25% = H5FL_REG_OP_ZERO */ + {250, H5FL_REG_OP_FILL1}, /* 25% = H5FL_REG_OP_FILL1 */ + {250, H5FL_REG_OP_FILL2}, /* 25% = H5FL_REG_OP_FILL2 */ + {250, H5FL_REG_OP_FILL3}, /* 25% = H5FL_REG_OP_FILL3 */ + {0, H5FL_REG_OP_FREE}, /* 0% = H5FL_REG_OP_FREE */ +}; + +/* Operation odds when vector is nearly full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_fac_test_op_odds h5fl_fac_vec_almost_full_ops_odds[] = { + {0, H5FL_FAC_OP_MALLOC}, /* 0% = H5FL_FAC_OP_MALLOC */ + {0, H5FL_FAC_OP_CALLOC}, /* 0% = H5FL_FAC_OP_CALLOC */ + {250, H5FL_FAC_OP_ZERO}, /* 25% = H5FL_FAC_OP_ZERO */ + {250, H5FL_FAC_OP_FILL1}, /* 25% = H5FL_FAC_OP_FILL1 */ + {250, H5FL_FAC_OP_FILL2}, /* 25% = H5FL_FAC_OP_FILL2 */ + {250, H5FL_FAC_OP_FILL3}, /* 25% = H5FL_FAC_OP_FILL3 */ + {0, H5FL_FAC_OP_FREE}, /* 0% = H5FL_FAC_OP_FREE */ +}; + +/* Operation odds when vector is nearly full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_blk_test_op_odds h5fl_blk_vec_almost_full_ops_odds[] = { + {0, H5FL_BLK_OP_MALLOC}, /* 0% = H5FL_BLK_OP_MALLOC */ + {0, H5FL_BLK_OP_CALLOC}, /* 0% = H5FL_BLK_OP_CALLOC */ + {400, H5FL_BLK_OP_REALLOC}, /* 40% = H5FL_BLK_OP_REALLOC */ + {150, H5FL_BLK_OP_ZERO}, /* 15% = H5FL_BLK_OP_ZERO */ + {150, H5FL_BLK_OP_FILL1}, /* 15% = H5FL_BLK_OP_FILL1 */ + {150, H5FL_BLK_OP_FILL2}, /* 15% = H5FL_BLK_OP_FILL2 */ + {150, H5FL_BLK_OP_FILL3}, /* 15% = H5FL_BLK_OP_FILL3 */ + {0, H5FL_BLK_OP_FREE}, /* 0% = H5FL_BLK_OP_FREE */ +}; + +/* Operation odds when vector is nearly full */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_arr_test_op_odds h5fl_arr_vec_almost_full_ops_odds[] = { + {0, H5FL_ARR_OP_MALLOC}, /* 0% = H5FL_ARR_OP_MALLOC */ + {0, H5FL_ARR_OP_CALLOC}, /* 0% = H5FL_ARR_OP_CALLOC */ + {400, H5FL_ARR_OP_REALLOC}, /* 40% = H5FL_ARR_OP_REALLOC */ + {150, H5FL_ARR_OP_ZERO}, /* 15% = H5FL_ARR_OP_ZERO */ + {150, H5FL_ARR_OP_FILL1}, /* 15% = H5FL_ARR_OP_FILL1 */ + {150, H5FL_ARR_OP_FILL2}, /* 15% = H5FL_ARR_OP_FILL2 */ + {150, H5FL_ARR_OP_FILL3}, /* 15% = H5FL_ARR_OP_FILL3 */ + {0, H5FL_ARR_OP_FREE}, /* 0% = H5FL_ARR_OP_FREE */ +}; + +/* Operation odds when token array is empty */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_reg_test_op_odds h5fl_reg_empty_ops_odds[] = { + {500, H5FL_REG_OP_MALLOC}, /* 50% = H5FL_REG_OP_MALLOC */ + {500, H5FL_REG_OP_CALLOC}, /* 50% = H5FL_REG_OP_CALLOC */ + {0, H5FL_REG_OP_ZERO}, /* 0% = H5FL_REG_OP_ZERO */ + {0, H5FL_REG_OP_FILL1}, /* 0% = H5FL_REG_OP_FILL1 */ + {0, H5FL_REG_OP_FILL2}, /* 0% = H5FL_REG_OP_FILL2 */ + {0, H5FL_REG_OP_FILL3}, /* 0% = H5FL_REG_OP_FILL3 */ + {0, H5FL_REG_OP_FREE}, /* 0% = H5FL_REG_OP_FREE */ +}; + +/* Operation odds when token array is empty */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_fac_test_op_odds h5fl_fac_empty_ops_odds[] = { + {500, H5FL_FAC_OP_MALLOC}, /* 50% = H5FL_FAC_OP_MALLOC */ + {500, H5FL_FAC_OP_CALLOC}, /* 50% = H5FL_FAC_OP_CALLOC */ + {0, H5FL_FAC_OP_ZERO}, /* 0% = H5FL_FAC_OP_ZERO */ + {0, H5FL_FAC_OP_FILL1}, /* 0% = H5FL_FAC_OP_FILL1 */ + {0, H5FL_FAC_OP_FILL2}, /* 0% = H5FL_FAC_OP_FILL2 */ + {0, H5FL_FAC_OP_FILL3}, /* 0% = H5FL_FAC_OP_FILL3 */ + {0, H5FL_FAC_OP_FREE}, /* 0% = H5FL_FAC_OP_FREE */ +}; + +/* Operation odds when token array is empty */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_blk_test_op_odds h5fl_blk_empty_ops_odds[] = { + {500, H5FL_BLK_OP_MALLOC}, /* 50% = H5FL_BLK_OP_MALLOC */ + {500, H5FL_BLK_OP_CALLOC}, /* 50% = H5FL_BLK_OP_CALLOC */ + {0, H5FL_BLK_OP_REALLOC}, /* 0% = H5FL_BLK_OP_REALLOC */ + {0, H5FL_BLK_OP_ZERO}, /* 0% = H5FL_BLK_OP_ZERO */ + {0, H5FL_BLK_OP_FILL1}, /* 0% = H5FL_BLK_OP_FILL1 */ + {0, H5FL_BLK_OP_FILL2}, /* 0% = H5FL_BLK_OP_FILL2 */ + {0, H5FL_BLK_OP_FILL3}, /* 0% = H5FL_BLK_OP_FILL3 */ + {0, H5FL_BLK_OP_FREE}, /* 0% = H5FL_BLK_OP_FREE */ +}; + +/* Operation odds when token array is empty */ +/* (Must sum to 1000 (i.e. 100%) */ +static const h5fl_arr_test_op_odds h5fl_arr_empty_ops_odds[] = { + {500, H5FL_ARR_OP_MALLOC}, /* 50% = H5FL_ARR_OP_MALLOC */ + {500, H5FL_ARR_OP_CALLOC}, /* 50% = H5FL_ARR_OP_CALLOC */ + {0, H5FL_ARR_OP_REALLOC}, /* 0% = H5FL_ARR_OP_REALLOC */ + {0, H5FL_ARR_OP_ZERO}, /* 0% = H5FL_ARR_OP_ZERO */ + {0, H5FL_ARR_OP_FILL1}, /* 0% = H5FL_ARR_OP_FILL1 */ + {0, H5FL_ARR_OP_FILL2}, /* 0% = H5FL_ARR_OP_FILL2 */ + {0, H5FL_ARR_OP_FILL3}, /* 0% = H5FL_ARR_OP_FILL3 */ + {0, H5FL_ARR_OP_FREE}, /* 0% = H5FL_ARR_OP_FREE */ +}; + +static unsigned +get_new_h5fl_reg_token(h5fl_reg_test_token *tokens, unsigned *next_token) +{ + unsigned curr_pos = *next_token; + unsigned start_pos = curr_pos; + + do { + /* Check for empty position */ + if (NULL == tokens[curr_pos].val) { + *next_token = (curr_pos + 1) % MAX_TOKENS; + return curr_pos; + } + + curr_pos = (curr_pos + 1) % MAX_TOKENS; + } while (curr_pos != start_pos); + + assert(curr_pos == start_pos && "Can't find empty position for new token"); + abort(); +} + +static unsigned +get_new_h5fl_fac_token(h5fl_fac_test_token *tokens, unsigned *next_token) +{ + unsigned curr_pos = *next_token; + unsigned start_pos = curr_pos; + + do { + /* Check for empty position */ + if (NULL == tokens[curr_pos].val) { + *next_token = (curr_pos + 1) % MAX_TOKENS; + return curr_pos; + } + + curr_pos = (curr_pos + 1) % MAX_TOKENS; + } while (curr_pos != start_pos); + + assert(curr_pos == start_pos && "Can't find empty position for new token"); + abort(); +} + +static unsigned +get_new_h5fl_blk_token(h5fl_blk_test_token *tokens, unsigned *next_token) +{ + unsigned curr_pos = *next_token; + unsigned start_pos = curr_pos; + + do { + /* Check for empty position */ + if (NULL == tokens[curr_pos].val) { + *next_token = (curr_pos + 1) % MAX_TOKENS; + return curr_pos; + } + + curr_pos = (curr_pos + 1) % MAX_TOKENS; + } while (curr_pos != start_pos); + + assert(curr_pos == start_pos && "Can't find empty position for new token"); + abort(); +} + +static unsigned +get_new_h5fl_arr_token(h5fl_arr_test_token *tokens, unsigned *next_token) +{ + unsigned curr_pos = *next_token; + unsigned start_pos = curr_pos; + + do { + /* Check for empty position */ + if (NULL == tokens[curr_pos].val) { + *next_token = (curr_pos + 1) % MAX_TOKENS; + return curr_pos; + } + + curr_pos = (curr_pos + 1) % MAX_TOKENS; + } while (curr_pos != start_pos); + + assert(curr_pos == start_pos && "Can't find empty position for new token"); + abort(); +} + +static h5fl_reg_test_op_code +get_new_h5fl_reg_op(const h5fl_reg_test_op_odds *op_odds) +{ + unsigned idx; + unsigned rng; + + idx = 0; + rng = (unsigned)h5_local_rand() % 1000; + while (0 == op_odds[idx].odds || rng > op_odds[idx].odds) { + rng -= op_odds[idx].odds; + idx++; + } + + return op_odds[idx].op_code; +} + +static h5fl_fac_test_op_code +get_new_h5fl_fac_op(const h5fl_fac_test_op_odds *op_odds) +{ + unsigned idx; + unsigned rng; + + idx = 0; + rng = (unsigned)h5_local_rand() % 1000; + while (0 == op_odds[idx].odds || rng > op_odds[idx].odds) { + rng -= op_odds[idx].odds; + idx++; + } + + return op_odds[idx].op_code; +} + +static h5fl_blk_test_op_code +get_new_h5fl_blk_op(const h5fl_blk_test_op_odds *op_odds) +{ + unsigned idx; + unsigned rng; + + idx = 0; + rng = (unsigned)h5_local_rand() % 1000; + while (0 == op_odds[idx].odds || rng > op_odds[idx].odds) { + rng -= op_odds[idx].odds; + idx++; + } + + return op_odds[idx].op_code; +} + +static h5fl_arr_test_op_code +get_new_h5fl_arr_op(const h5fl_arr_test_op_odds *op_odds) +{ + unsigned idx; + unsigned rng; + + idx = 0; + rng = (unsigned)h5_local_rand() % 1000; + while (0 == op_odds[idx].odds || rng > op_odds[idx].odds) { + rng -= op_odds[idx].odds; + idx++; + } + + return op_odds[idx].op_code; +} + +static unsigned +get_active_h5fl_reg_token(h5fl_reg_test_token *tokens, unsigned num_possible_tokens) +{ + unsigned curr_pos; + unsigned start_pos; + + start_pos = curr_pos = (unsigned)h5_local_rand() % num_possible_tokens; + do { + /* Check for active position */ + if (NULL != tokens[curr_pos].val) + return curr_pos; + + curr_pos = (curr_pos + 1) % num_possible_tokens; + } while (curr_pos != start_pos); + + assert(curr_pos == start_pos && "Can't find active token"); + abort(); +} + +static unsigned +get_active_h5fl_fac_token(h5fl_fac_test_token *tokens, unsigned num_possible_tokens) +{ + unsigned curr_pos; + unsigned start_pos; + + start_pos = curr_pos = (unsigned)h5_local_rand() % num_possible_tokens; + do { + /* Check for active position */ + if (NULL != tokens[curr_pos].val) + return curr_pos; + + curr_pos = (curr_pos + 1) % num_possible_tokens; + } while (curr_pos != start_pos); + + assert(curr_pos == start_pos && "Can't find active token"); + abort(); +} + +static unsigned +get_active_h5fl_blk_token(h5fl_blk_test_token *tokens, unsigned num_possible_tokens) +{ + unsigned curr_pos; + unsigned start_pos; + + start_pos = curr_pos = (unsigned)h5_local_rand() % num_possible_tokens; + do { + /* Check for active position */ + if (NULL != tokens[curr_pos].val) + return curr_pos; + + curr_pos = (curr_pos + 1) % num_possible_tokens; + } while (curr_pos != start_pos); + + assert(curr_pos == start_pos && "Can't find active token"); + abort(); +} + +static unsigned +get_active_h5fl_arr_token(h5fl_arr_test_token *tokens, unsigned num_possible_tokens) +{ + unsigned curr_pos; + unsigned start_pos; + + start_pos = curr_pos = (unsigned)h5_local_rand() % num_possible_tokens; + do { + /* Check for active position */ + if (NULL != tokens[curr_pos].val) + return curr_pos; + + curr_pos = (curr_pos + 1) % num_possible_tokens; + } while (curr_pos != start_pos); + + assert(curr_pos == start_pos && "Can't find active token"); + abort(); +} + +#if 0 +static void +print_h5fl_reg_vector(h5fl_reg_test_vector *vector, h5fl_reg_test_token *tokens) +{ + unsigned num_active_tokens = 0; /* # of active tokens at any position in the test vector execution */ + + /* Print test vector */ + for (unsigned u = 0; u < vector->vec_size; u++) { + switch (vector->op_vector[u].op_code) { + case H5FL_REG_OP_MALLOC: + fprintf(stderr, "%04u (%u): H5FL_REG_OP_MALLOC - token: %p, type_idx = %u\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].param.type_idx); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->state = H5FL_REG_ST_UNINIT; + + /* Increment # of active tokens */ + num_active_tokens++; + break; + + case H5FL_REG_OP_CALLOC: + fprintf(stderr, "%04u (%u): H5FL_REG_OP_CALLOC - token: %p, type_idx = %u\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].param.type_idx); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->state = H5FL_REG_ST_ZERO; + + /* Increment # of active tokens */ + num_active_tokens++; + break; + + case H5FL_REG_OP_ZERO: + fprintf(stderr, "%04u (%u): H5FL_REG_OP_ZERO - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + vector->op_vector[u].token->state = H5FL_REG_ST_ZERO; + break; + + case H5FL_REG_OP_FILL1: + fprintf(stderr, "%04u (%u): H5FL_REG_OP_FILL1 - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + vector->op_vector[u].token->state = H5FL_REG_ST_FILL1; + break; + + case H5FL_REG_OP_FILL2: + fprintf(stderr, "%04u (%u): H5FL_REG_OP_FILL2 - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + vector->op_vector[u].token->state = H5FL_REG_ST_FILL2; + break; + + case H5FL_REG_OP_FILL3: + fprintf(stderr, "%04u (%u): H5FL_REG_OP_FILL3 - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + vector->op_vector[u].token->state = H5FL_REG_ST_FILL3; + break; + + case H5FL_REG_OP_FREE: + fprintf(stderr, "%04u (%u): H5FL_REG_OP_FREE - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + + /* Decrement # of active tokens */ + num_active_tokens--; + break; + + default: + assert (0 && "Invalid op code"); + abort(); + } + } +} + +static void +print_h5fl_fac_vector(h5fl_fac_test_vector *vector, h5fl_fac_test_token *tokens) +{ + unsigned num_active_tokens = 0; /* # of active tokens at any position in the test vector execution */ + + /* Print test vector */ + for (unsigned u = 0; u < vector->vec_size; u++) { + switch (vector->op_vector[u].op_code) { + case H5FL_FAC_OP_MALLOC: + fprintf(stderr, "%04u (%u): H5FL_FAC_OP_MALLOC - token: %p, type_idx = %u\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].param.type_idx); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->state = H5FL_FAC_ST_UNINIT; + + /* Increment # of active tokens */ + num_active_tokens++; + break; + + case H5FL_FAC_OP_CALLOC: + fprintf(stderr, "%04u (%u): H5FL_FAC_OP_CALLOC - token: %p, type_idx = %u\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].param.type_idx); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->state = H5FL_FAC_ST_ZERO; + + /* Increment # of active tokens */ + num_active_tokens++; + break; + + case H5FL_FAC_OP_ZERO: + fprintf(stderr, "%04u (%u): H5FL_FAC_OP_ZERO - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + vector->op_vector[u].token->state = H5FL_FAC_ST_ZERO; + break; + + case H5FL_FAC_OP_FILL1: + fprintf(stderr, "%04u (%u): H5FL_FAC_OP_FILL1 - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + vector->op_vector[u].token->state = H5FL_FAC_ST_FILL1; + break; + + case H5FL_FAC_OP_FILL2: + fprintf(stderr, "%04u (%u): H5FL_FAC_OP_FILL2 - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + vector->op_vector[u].token->state = H5FL_FAC_ST_FILL2; + break; + + case H5FL_FAC_OP_FILL3: + fprintf(stderr, "%04u (%u): H5FL_FAC_OP_FILL3 - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + vector->op_vector[u].token->state = H5FL_FAC_ST_FILL3; + break; + + case H5FL_FAC_OP_FREE: + fprintf(stderr, "%04u (%u): H5FL_FAC_OP_FREE - token: %p (type_idx: %u, state: %u)\n", u, num_active_tokens, (void *)vector->op_vector[u].token, vector->op_vector[u].token->type_idx, vector->op_vector[u].token->state); + + /* Decrement # of active tokens */ + num_active_tokens--; + break; + + default: + assert (0 && "Invalid op code"); + abort(); + } + } +} +#endif + +static void +init_h5fl_reg_vector(unsigned vec_size, h5fl_reg_test_vector *vector, unsigned num_tokens, + h5fl_reg_test_token *tokens) +{ + unsigned num_active_tokens = 0; /* # of active tokens at any position in the test vector execution */ + unsigned curr_alloc_token; /* Current position for allocating tokens */ + unsigned pos; /* Current position in the test vector */ + bool tokens_wrapped = false; + + /* Allocate the test vector */ + vector->vec_size = vec_size; + vector->op_vector = calloc(vec_size, sizeof(h5fl_reg_test_op)); + CHECK_PTR(vector->op_vector, "calloc"); + + /* Fiil the test vector, leaving room to free active tokens */ + pos = 0; + curr_alloc_token = 0; + while (pos < (vec_size - num_active_tokens)) { + h5fl_reg_test_op_code op_code; + + /* Check for active tokens */ + /* (Also must have enough room for both alloc & free operations) */ + if (0 == num_active_tokens && pos < (vec_size - 2)) + op_code = get_new_h5fl_reg_op(h5fl_reg_empty_ops_odds); + else { + /* Don't create new tokens when there won't be enough room in the + * vector for both the alloc & free operations. + */ + if (pos > ((vec_size - num_active_tokens) - 2)) + op_code = get_new_h5fl_reg_op(h5fl_reg_vec_almost_full_ops_odds); + /* Don't create new tokens when the token array is full */ + else if (num_tokens == num_active_tokens) + op_code = get_new_h5fl_reg_op(h5fl_reg_full_ops_odds); + else + op_code = get_new_h5fl_reg_op(h5fl_reg_all_ops_odds); + } + + /* Set op code */ + vector->op_vector[pos].op_code = op_code; + + /* Set up specific parameters for each op code */ + switch (op_code) { + case H5FL_REG_OP_MALLOC: + case H5FL_REG_OP_CALLOC: { + unsigned prev_alloc_token = curr_alloc_token; + unsigned type_idx; + unsigned new_token; + + /* RNG type to allocate */ + type_idx = (unsigned)h5_local_rand() % (unsigned)NELMTS(h5fl_reg_test_types); + new_token = get_new_h5fl_reg_token(tokens, &curr_alloc_token); + vector->op_vector[pos].token = &tokens[new_token]; + vector->op_vector[pos].param.type_idx = type_idx; + + /* Mark token as used */ + tokens[new_token].val = (void *)(~(uintptr_t)NULL); + + /* Increment # of active tokens */ + num_active_tokens++; + + /* Check for tokens wrapping */ + if (curr_alloc_token < prev_alloc_token) + tokens_wrapped = true; + } break; + + case H5FL_REG_OP_ZERO: + case H5FL_REG_OP_FILL1: + case H5FL_REG_OP_FILL2: + case H5FL_REG_OP_FILL3: + case H5FL_REG_OP_FREE: { + unsigned token_idx; + + token_idx = get_active_h5fl_reg_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + + if (H5FL_REG_OP_FREE == op_code) { + /* Mark token as free */ + tokens[token_idx].val = NULL; + + /* Decrement # of active tokens */ + num_active_tokens--; + } + } break; + + default: + assert(0 && "Invalid op code"); + abort(); + } + + pos++; + } + + /* Fill remainder of test vector with free operations */ + while (pos < vec_size) { + unsigned token_idx; + + /* Set op code */ + vector->op_vector[pos].op_code = H5FL_REG_OP_FREE; + + token_idx = get_active_h5fl_reg_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + + /* Mark token as free */ + tokens[token_idx].val = NULL; + + /* Decrement # of active tokens */ + num_active_tokens--; + + pos++; + } + + assert(0 == num_active_tokens); +} + +static void +init_h5fl_fac_vector(unsigned vec_size, h5fl_fac_test_vector *vector, unsigned num_tokens, + h5fl_fac_test_token *tokens) +{ + unsigned num_active_tokens = 0; /* # of active tokens at any position in the test vector execution */ + unsigned curr_alloc_token; /* Current position for allocating tokens */ + unsigned pos; /* Current position in the test vector */ + bool tokens_wrapped = false; + + /* Allocate the test vector */ + vector->vec_size = vec_size; + vector->op_vector = calloc(vec_size, sizeof(h5fl_fac_test_op)); + CHECK_PTR(vector->op_vector, "calloc"); + + /* Fiil the test vector, leaving room to free active tokens */ + pos = 0; + curr_alloc_token = 0; + while (pos < (vec_size - num_active_tokens)) { + h5fl_fac_test_op_code op_code; + + /* Check for active tokens */ + /* (Also must have enough room for both alloc & free operations) */ + if (0 == num_active_tokens && pos < (vec_size - 2)) + op_code = get_new_h5fl_fac_op(h5fl_fac_empty_ops_odds); + else { + /* Don't create new tokens when there won't be enough room in the + * vector for both the alloc & free operations. + */ + if (pos > ((vec_size - num_active_tokens) - 2)) + op_code = get_new_h5fl_fac_op(h5fl_fac_vec_almost_full_ops_odds); + /* Don't create new tokens when the token array is full */ + else if (num_tokens == num_active_tokens) + op_code = get_new_h5fl_fac_op(h5fl_fac_full_ops_odds); + else + op_code = get_new_h5fl_fac_op(h5fl_fac_all_ops_odds); + } + + /* Set op code */ + vector->op_vector[pos].op_code = op_code; + + /* Set up specific parameters for each op code */ + switch (op_code) { + case H5FL_FAC_OP_MALLOC: + case H5FL_FAC_OP_CALLOC: { + unsigned prev_alloc_token = curr_alloc_token; + unsigned type_idx; + unsigned new_token; + + /* RNG type to allocate */ + type_idx = (unsigned)h5_local_rand() % (unsigned)NELMTS(h5fl_fac_test_types); + new_token = get_new_h5fl_fac_token(tokens, &curr_alloc_token); + vector->op_vector[pos].token = &tokens[new_token]; + vector->op_vector[pos].param.type_idx = type_idx; + + /* Mark token as used */ + tokens[new_token].val = (void *)(~(uintptr_t)NULL); + + /* Increment # of active tokens */ + num_active_tokens++; + + /* Check for tokens wrapping */ + if (curr_alloc_token < prev_alloc_token) + tokens_wrapped = true; + } break; + + case H5FL_FAC_OP_ZERO: + case H5FL_FAC_OP_FILL1: + case H5FL_FAC_OP_FILL2: + case H5FL_FAC_OP_FILL3: + case H5FL_FAC_OP_FREE: { + unsigned token_idx; + + token_idx = get_active_h5fl_fac_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + + if (H5FL_FAC_OP_FREE == op_code) { + /* Mark token as free */ + tokens[token_idx].val = NULL; + + /* Decrement # of active tokens */ + num_active_tokens--; + } + } break; + + default: + assert(0 && "Invalid op code"); + abort(); + } + + pos++; + } + + /* Fill remainder of test vector with free operations */ + while (pos < vec_size) { + unsigned token_idx; + + /* Set op code */ + vector->op_vector[pos].op_code = H5FL_FAC_OP_FREE; + + token_idx = get_active_h5fl_fac_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + + /* Mark token as free */ + tokens[token_idx].val = NULL; + + /* Decrement # of active tokens */ + num_active_tokens--; + + pos++; + } + + assert(0 == num_active_tokens); +} + +static void +init_h5fl_blk_vector(unsigned vec_size, h5fl_blk_test_vector *vector, unsigned num_tokens, + h5fl_blk_test_token *tokens) +{ + unsigned num_active_tokens = 0; /* # of active tokens at any position in the test vector execution */ + unsigned curr_alloc_token; /* Current position for allocating tokens */ + unsigned pos; /* Current position in the test vector */ + bool tokens_wrapped = false; + + /* Allocate the test vector */ + vector->vec_size = vec_size; + vector->op_vector = calloc(vec_size, sizeof(h5fl_blk_test_op)); + CHECK_PTR(vector->op_vector, "calloc"); + + /* Fiil the test vector, leaving room to free active tokens */ + pos = 0; + curr_alloc_token = 0; + while (pos < (vec_size - num_active_tokens)) { + h5fl_blk_test_op_code op_code; + + /* Check for active tokens */ + /* (Also must have enough room for both alloc & free operations) */ + if (0 == num_active_tokens && pos < (vec_size - 2)) + op_code = get_new_h5fl_blk_op(h5fl_blk_empty_ops_odds); + else { + /* Don't create new tokens when there won't be enough room in the + * vector for both the alloc & free operations. + */ + if (pos > ((vec_size - num_active_tokens) - 2)) + op_code = get_new_h5fl_blk_op(h5fl_blk_vec_almost_full_ops_odds); + /* Don't create new tokens when the token array is full */ + else if (num_tokens == num_active_tokens) + op_code = get_new_h5fl_blk_op(h5fl_blk_full_ops_odds); + else + op_code = get_new_h5fl_blk_op(h5fl_blk_all_ops_odds); + } + + /* Set op code */ + vector->op_vector[pos].op_code = op_code; + + /* Set up specific parameters for each op code */ + switch (op_code) { + case H5FL_BLK_OP_MALLOC: + case H5FL_BLK_OP_CALLOC: { + unsigned prev_alloc_token = curr_alloc_token; + unsigned type_idx; + unsigned new_token; + + /* RNG type to allocate */ + + type_idx = (unsigned)h5_local_rand() % (unsigned)NELMTS(h5fl_blk_test_types); + new_token = get_new_h5fl_blk_token(tokens, &curr_alloc_token); + vector->op_vector[pos].token = &tokens[new_token]; + vector->op_vector[pos].param.type_idx = type_idx; + + /* Mark token as used */ + tokens[new_token].val = (void *)(~(uintptr_t)NULL); + + /* Increment # of active tokens */ + num_active_tokens++; + + /* Check for tokens wrapping */ + if (curr_alloc_token < prev_alloc_token) + tokens_wrapped = true; + } break; + + case H5FL_BLK_OP_REALLOC: { + unsigned token_idx; + + token_idx = get_active_h5fl_blk_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + vector->op_vector[pos].param.size_shift = ((unsigned)h5_local_rand() & 0x10) ? 1 : -1; + } break; + + case H5FL_BLK_OP_ZERO: + case H5FL_BLK_OP_FILL1: + case H5FL_BLK_OP_FILL2: + case H5FL_BLK_OP_FILL3: + case H5FL_BLK_OP_FREE: { + unsigned token_idx; + + token_idx = get_active_h5fl_blk_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + + if (H5FL_BLK_OP_FREE == op_code) { + /* Mark token as free */ + tokens[token_idx].val = NULL; + + /* Decrement # of active tokens */ + num_active_tokens--; + } + } break; + + default: + assert(0 && "Invalid op code"); + abort(); + } + + pos++; + } + + /* Fill remainder of test vector with free operations */ + while (pos < vec_size) { + unsigned token_idx; + + /* Set op code */ + vector->op_vector[pos].op_code = H5FL_BLK_OP_FREE; + + token_idx = get_active_h5fl_blk_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + + /* Mark token as free */ + tokens[token_idx].val = NULL; + + /* Decrement # of active tokens */ + num_active_tokens--; + + pos++; + } + + assert(0 == num_active_tokens); +} + +static void +init_h5fl_arr_vector(unsigned vec_size, h5fl_arr_test_vector *vector, unsigned num_tokens, + h5fl_arr_test_token *tokens) +{ + unsigned num_active_tokens = 0; /* # of active tokens at any position in the test vector execution */ + unsigned curr_alloc_token; /* Current position for allocating tokens */ + unsigned pos; /* Current position in the test vector */ + bool tokens_wrapped = false; + + /* Allocate the test vector */ + vector->vec_size = vec_size; + vector->op_vector = calloc(vec_size, sizeof(h5fl_arr_test_op)); + CHECK_PTR(vector->op_vector, "calloc"); + + /* Fiil the test vector, leaving room to free active tokens */ + pos = 0; + curr_alloc_token = 0; + while (pos < (vec_size - num_active_tokens)) { + h5fl_arr_test_op_code op_code; + + /* Check for active tokens */ + /* (Also must have enough room for both alloc & free operations) */ + if (0 == num_active_tokens && pos < (vec_size - 2)) + op_code = get_new_h5fl_arr_op(h5fl_arr_empty_ops_odds); + else { + /* Don't create new tokens when there won't be enough room in the + * vector for both the alloc & free operations. + */ + if (pos > ((vec_size - num_active_tokens) - 2)) + op_code = get_new_h5fl_arr_op(h5fl_arr_vec_almost_full_ops_odds); + /* Don't create new tokens when the token array is full */ + else if (num_tokens == num_active_tokens) + op_code = get_new_h5fl_arr_op(h5fl_arr_full_ops_odds); + else + op_code = get_new_h5fl_arr_op(h5fl_arr_all_ops_odds); + } + + /* Set op code */ + vector->op_vector[pos].op_code = op_code; + + /* Set up specific parameters for each op code */ + switch (op_code) { + case H5FL_ARR_OP_MALLOC: + case H5FL_ARR_OP_CALLOC: { + unsigned prev_alloc_token = curr_alloc_token; + unsigned type_idx; + unsigned new_token; + + /* RNG type to allocate */ + + type_idx = (unsigned)h5_local_rand() % (unsigned)NELMTS(h5fl_arr_test_types); + new_token = get_new_h5fl_arr_token(tokens, &curr_alloc_token); + vector->op_vector[pos].token = &tokens[new_token]; + vector->op_vector[pos].param.type_idx = type_idx; + + /* Mark token as used */ + tokens[new_token].val = (void *)(~(uintptr_t)NULL); + + /* Increment # of active tokens */ + num_active_tokens++; + + /* Check for tokens wrapping */ + if (curr_alloc_token < prev_alloc_token) + tokens_wrapped = true; + } break; + + case H5FL_ARR_OP_REALLOC: { + unsigned token_idx; + + token_idx = get_active_h5fl_arr_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + vector->op_vector[pos].param.rng_size = (unsigned)h5_local_rand(); + } break; + + case H5FL_ARR_OP_ZERO: + case H5FL_ARR_OP_FILL1: + case H5FL_ARR_OP_FILL2: + case H5FL_ARR_OP_FILL3: + case H5FL_BLK_OP_FREE: { + unsigned token_idx; + + token_idx = get_active_h5fl_arr_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + + if (H5FL_ARR_OP_FREE == op_code) { + /* Mark token as free */ + tokens[token_idx].val = NULL; + + /* Decrement # of active tokens */ + num_active_tokens--; + } + } break; + + default: + assert(0 && "Invalid op code"); + abort(); + } + + pos++; + } + + /* Fill remainder of test vector with free operations */ + while (pos < vec_size) { + unsigned token_idx; + + /* Set op code */ + vector->op_vector[pos].op_code = H5FL_ARR_OP_FREE; + + token_idx = get_active_h5fl_arr_token(tokens, tokens_wrapped ? num_tokens : curr_alloc_token); + vector->op_vector[pos].token = &tokens[token_idx]; + + /* Mark token as free */ + tokens[token_idx].val = NULL; + + /* Decrement # of active tokens */ + num_active_tokens--; + + pos++; + } + + assert(0 == num_active_tokens); +} + +static inline unsigned +validate_h5fl_reg_token(const h5fl_reg_test_token *token) +{ + int v; + + switch (token->state) { + case H5FL_REG_ST_UNINIT: + break; + + case H5FL_REG_ST_ZERO: + v = memcmp(token->val, h5fl_reg_test_types[token->type_idx].zero, + h5fl_reg_test_types[token->type_idx].elmt_size); + VERIFY(v, 0, "H5FL_REG_ST_ZERO"); + if (0 != v) + return (1); + break; + + case H5FL_REG_ST_FILL1: + v = memcmp(token->val, h5fl_reg_test_types[token->type_idx].fill1, + h5fl_reg_test_types[token->type_idx].elmt_size); + VERIFY(v, 0, "H5FL_REG_ST_FILL1"); + if (0 != v) + return (1); + break; + + case H5FL_REG_ST_FILL2: + v = memcmp(token->val, h5fl_reg_test_types[token->type_idx].fill2, + h5fl_reg_test_types[token->type_idx].elmt_size); + VERIFY(v, 0, "H5FL_REG_ST_FILL2"); + if (0 != v) + return (1); + break; + + case H5FL_REG_ST_FILL3: + v = memcmp(token->val, h5fl_reg_test_types[token->type_idx].fill3, + h5fl_reg_test_types[token->type_idx].elmt_size); + VERIFY(v, 0, "H5FL_REG_ST_FILL3"); + if (0 != v) + return (1); + break; + + default: + assert(0 && "Invalid state for token"); + abort(); + } + + return (0); +} + +static inline unsigned +validate_h5fl_fac_token(const h5fl_fac_test_token *token) +{ + int v; + + switch (token->state) { + case H5FL_FAC_ST_UNINIT: + break; + + case H5FL_FAC_ST_ZERO: + v = memcmp(token->val, h5fl_fac_test_types[token->type_idx].zero, + h5fl_fac_test_types[token->type_idx].elmt_size); + VERIFY(v, 0, "H5FL_FAC_ST_ZERO"); + if (0 != v) + return (1); + break; + + case H5FL_FAC_ST_FILL1: + v = memcmp(token->val, h5fl_fac_test_types[token->type_idx].fill1, + h5fl_fac_test_types[token->type_idx].elmt_size); + VERIFY(v, 0, "H5FL_FAC_ST_FILL1"); + if (0 != v) + return (1); + break; + + case H5FL_FAC_ST_FILL2: + v = memcmp(token->val, h5fl_fac_test_types[token->type_idx].fill2, + h5fl_fac_test_types[token->type_idx].elmt_size); + VERIFY(v, 0, "H5FL_FAC_ST_FILL2"); + if (0 != v) + return (1); + break; + + case H5FL_FAC_ST_FILL3: + v = memcmp(token->val, h5fl_fac_test_types[token->type_idx].fill3, + h5fl_fac_test_types[token->type_idx].elmt_size); + VERIFY(v, 0, "H5FL_FAC_ST_FILL3"); + if (0 != v) + return (1); + break; + + default: + assert(0 && "Invalid state for token"); + abort(); + } + + return (0); +} + +static inline unsigned +validate_h5fl_blk_token(const h5fl_blk_test_token *token) +{ + unsigned u; + + switch (token->state) { + case H5FL_BLK_ST_UNINIT: + break; + + case H5FL_BLK_ST_ZERO: + for (u = 0; u < token->curr_size; u++) { + VERIFY(token->val[u], 0, "H5FL_BLK_ST_ZERO"); + if (0 != token->val[u]) + return (1); + } + break; + + case H5FL_BLK_ST_FILL1: + for (u = 0; u < token->curr_size; u++) { + VERIFY(token->val[u], 1, "H5FL_BLK_ST_FILL1"); + if (1 != token->val[u]) + return (1); + } + break; + + case H5FL_BLK_ST_FILL2: + for (u = 0; u < token->curr_size; u++) { + VERIFY(token->val[u], 2, "H5FL_BLK_ST_FILL2"); + if (2 != token->val[u]) + return (1); + } + break; + + case H5FL_BLK_ST_FILL3: + for (u = 0; u < token->curr_size; u++) { + VERIFY(token->val[u], 3, "H5FL_BLK_ST_FILL3"); + if (3 != token->val[u]) + return (1); + } + break; + + default: + assert(0 && "Invalid state for token"); + abort(); + } + + return (0); +} + +static inline unsigned +validate_h5fl_arr_token(const h5fl_arr_test_token *token) +{ + unsigned char *v = (unsigned char *)token->val; + unsigned u; + + switch (token->state) { + case H5FL_ARR_ST_UNINIT: + break; + + case H5FL_ARR_ST_ZERO: + for (u = 0; u < token->curr_size; u++) { + VERIFY(v[u], 0, "H5FL_ARR_ST_ZERO"); + if (0 != v[u]) + return (1); + } + break; + + case H5FL_ARR_ST_FILL1: + for (u = 0; u < token->curr_size; u++) { + VERIFY(v[u], 1, "H5FL_ARR_ST_FILL1"); + if (1 != v[u]) + return (1); + } + break; + + case H5FL_ARR_ST_FILL2: + for (u = 0; u < token->curr_size; u++) { + VERIFY(v[u], 2, "H5FL_ARR_ST_FILL2"); + if (2 != v[u]) + return (1); + } + break; + + case H5FL_ARR_ST_FILL3: + for (u = 0; u < token->curr_size; u++) { + VERIFY(v[u], 3, "H5FL_ARR_ST_FILL3"); + if (3 != v[u]) + return (1); + } + break; + + default: + assert(0 && "Invalid state for token"); + abort(); + } + + return (0); +} + +static unsigned +run_h5fl_reg_vector(h5fl_reg_test_vector *vector) +{ + /* Execute test vector */ + for (unsigned u = 0; u < vector->vec_size; u++) { + switch (vector->op_vector[u].op_code) { + case H5FL_REG_OP_MALLOC: + vector->op_vector[u].token->val = + H5FL_reg_malloc(h5fl_reg_test_types[vector->op_vector[u].param.type_idx].free_list); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_reg_malloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->state = H5FL_REG_ST_UNINIT; + break; + + case H5FL_REG_OP_CALLOC: + vector->op_vector[u].token->val = + H5FL_reg_calloc(h5fl_reg_test_types[vector->op_vector[u].param.type_idx].free_list); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_reg_calloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->state = H5FL_REG_ST_ZERO; + break; + + case H5FL_REG_OP_ZERO: + if (H5FL_REG_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_reg_token(vector->op_vector[u].token)) + return (1); + if (H5FL_REG_ST_ZERO != vector->op_vector[u].token->state) { + memset(vector->op_vector[u].token->val, 0, + h5fl_reg_test_types[vector->op_vector[u].token->type_idx].elmt_size); + vector->op_vector[u].token->state = H5FL_REG_ST_ZERO; + } + break; + + case H5FL_REG_OP_FILL1: + if (H5FL_REG_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_reg_token(vector->op_vector[u].token)) + return (1); + if (H5FL_REG_ST_FILL1 != vector->op_vector[u].token->state) { + memcpy(vector->op_vector[u].token->val, + h5fl_reg_test_types[vector->op_vector[u].token->type_idx].fill1, + h5fl_reg_test_types[vector->op_vector[u].token->type_idx].elmt_size); + vector->op_vector[u].token->state = H5FL_REG_ST_FILL1; + } + break; + + case H5FL_REG_OP_FILL2: + if (H5FL_REG_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_reg_token(vector->op_vector[u].token)) + return (1); + if (H5FL_REG_ST_FILL2 != vector->op_vector[u].token->state) { + memcpy(vector->op_vector[u].token->val, + h5fl_reg_test_types[vector->op_vector[u].token->type_idx].fill2, + h5fl_reg_test_types[vector->op_vector[u].token->type_idx].elmt_size); + vector->op_vector[u].token->state = H5FL_REG_ST_FILL2; + } + break; + + case H5FL_REG_OP_FILL3: + if (H5FL_REG_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_reg_token(vector->op_vector[u].token)) + return (1); + if (H5FL_REG_ST_FILL3 != vector->op_vector[u].token->state) { + memcpy(vector->op_vector[u].token->val, + h5fl_reg_test_types[vector->op_vector[u].token->type_idx].fill3, + h5fl_reg_test_types[vector->op_vector[u].token->type_idx].elmt_size); + vector->op_vector[u].token->state = H5FL_REG_ST_FILL3; + } + break; + + case H5FL_REG_OP_FREE: + if (H5FL_REG_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_reg_token(vector->op_vector[u].token)) + return (1); + H5FL_reg_free(h5fl_reg_test_types[vector->op_vector[u].token->type_idx].free_list, + vector->op_vector[u].token->val); + vector->op_vector[u].token->val = NULL; + break; + + default: + assert(0 && "Invalid op code"); + abort(); + } + } + + return (0); +} + +static unsigned +run_h5fl_fac_vector(h5fl_fac_test_vector *vector) +{ + /* Execute test vector */ + for (unsigned u = 0; u < vector->vec_size; u++) { + switch (vector->op_vector[u].op_code) { + case H5FL_FAC_OP_MALLOC: + vector->op_vector[u].token->val = + H5FL_fac_malloc(h5fl_fac_test_types[vector->op_vector[u].param.type_idx].free_list); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_fac_malloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->state = H5FL_FAC_ST_UNINIT; + break; + + case H5FL_FAC_OP_CALLOC: + vector->op_vector[u].token->val = + H5FL_fac_calloc(h5fl_fac_test_types[vector->op_vector[u].param.type_idx].free_list); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_fac_calloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->state = H5FL_FAC_ST_ZERO; + break; + + case H5FL_FAC_OP_ZERO: + if (H5FL_FAC_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_fac_token(vector->op_vector[u].token)) + return (1); + if (H5FL_FAC_ST_ZERO != vector->op_vector[u].token->state) { + memset(vector->op_vector[u].token->val, 0, + h5fl_fac_test_types[vector->op_vector[u].token->type_idx].elmt_size); + vector->op_vector[u].token->state = H5FL_FAC_ST_ZERO; + } + break; + + case H5FL_FAC_OP_FILL1: + if (H5FL_FAC_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_fac_token(vector->op_vector[u].token)) + return (1); + if (H5FL_FAC_ST_FILL1 != vector->op_vector[u].token->state) { + memcpy(vector->op_vector[u].token->val, + h5fl_fac_test_types[vector->op_vector[u].token->type_idx].fill1, + h5fl_fac_test_types[vector->op_vector[u].token->type_idx].elmt_size); + vector->op_vector[u].token->state = H5FL_FAC_ST_FILL1; + } + break; + + case H5FL_FAC_OP_FILL2: + if (H5FL_FAC_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_fac_token(vector->op_vector[u].token)) + return (1); + if (H5FL_FAC_ST_FILL2 != vector->op_vector[u].token->state) { + memcpy(vector->op_vector[u].token->val, + h5fl_fac_test_types[vector->op_vector[u].token->type_idx].fill2, + h5fl_fac_test_types[vector->op_vector[u].token->type_idx].elmt_size); + vector->op_vector[u].token->state = H5FL_FAC_ST_FILL2; + } + break; + + case H5FL_FAC_OP_FILL3: + if (H5FL_FAC_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_fac_token(vector->op_vector[u].token)) + return (1); + if (H5FL_FAC_ST_FILL3 != vector->op_vector[u].token->state) { + memcpy(vector->op_vector[u].token->val, + h5fl_fac_test_types[vector->op_vector[u].token->type_idx].fill3, + h5fl_fac_test_types[vector->op_vector[u].token->type_idx].elmt_size); + vector->op_vector[u].token->state = H5FL_FAC_ST_FILL3; + } + break; + + case H5FL_FAC_OP_FREE: + if (H5FL_FAC_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_fac_token(vector->op_vector[u].token)) + return (1); + H5FL_fac_free(h5fl_fac_test_types[vector->op_vector[u].token->type_idx].free_list, + vector->op_vector[u].token->val); + vector->op_vector[u].token->val = NULL; + break; + + default: + assert(0 && "Invalid op code"); + abort(); + } + } + + return (0); +} + +static void +fill_h5fl_blk_vector(h5fl_blk_test_token *token) +{ + switch (token->state) { + case H5FL_BLK_ST_UNINIT: + break; + + case H5FL_BLK_ST_ZERO: + memset(token->val, 0, token->curr_size); + break; + + case H5FL_BLK_ST_FILL1: + memset(token->val, 1, token->curr_size); + break; + + case H5FL_BLK_ST_FILL2: + memset(token->val, 2, token->curr_size); + break; + + case H5FL_BLK_ST_FILL3: + memset(token->val, 3, token->curr_size); + break; + + default: + assert(0 && "Invalid state for token"); + abort(); + } +} + +static unsigned +run_h5fl_blk_vector(h5fl_blk_test_vector *vector) +{ + /* Execute test vector */ + for (unsigned u = 0; u < vector->vec_size; u++) { + switch (vector->op_vector[u].op_code) { + case H5FL_BLK_OP_MALLOC: + vector->op_vector[u].token->val = + H5FL_blk_malloc(h5fl_blk_test_types[vector->op_vector[u].param.type_idx].free_list, + h5fl_blk_test_types[vector->op_vector[u].param.type_idx].initial_size); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_blk_malloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->curr_size = + h5fl_blk_test_types[vector->op_vector[u].param.type_idx].initial_size; + vector->op_vector[u].token->size_shift = 0; + vector->op_vector[u].token->state = H5FL_BLK_ST_UNINIT; + break; + + case H5FL_BLK_OP_CALLOC: + vector->op_vector[u].token->val = + H5FL_blk_calloc(h5fl_blk_test_types[vector->op_vector[u].param.type_idx].free_list, + h5fl_blk_test_types[vector->op_vector[u].param.type_idx].initial_size); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_blk_calloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->curr_size = + h5fl_blk_test_types[vector->op_vector[u].param.type_idx].initial_size; + vector->op_vector[u].token->size_shift = 0; + vector->op_vector[u].token->state = H5FL_BLK_ST_ZERO; + break; + + case H5FL_BLK_OP_REALLOC: { + size_t new_size; + size_t prev_size; + + /* Choose new size for token's buffer */ + vector->op_vector[u].token->size_shift += vector->op_vector[u].param.size_shift; + if (vector->op_vector[u].token->size_shift > 0) + new_size = h5fl_blk_test_types[vector->op_vector[u].token->type_idx].initial_size + << vector->op_vector[u].token->size_shift; + else if (vector->op_vector[u].token->size_shift < 0) { + new_size = h5fl_blk_test_types[vector->op_vector[u].token->type_idx].initial_size >> + (-vector->op_vector[u].token->size_shift); + if (0 == new_size) + new_size = 1; + } + else + new_size = h5fl_blk_test_types[vector->op_vector[u].token->type_idx].initial_size; + + /* Validate current buffer */ + if (H5FL_BLK_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_blk_token(vector->op_vector[u].token)) + return (1); + + /* Reallocate buffer */ + vector->op_vector[u].token->val = + H5FL_blk_realloc(h5fl_blk_test_types[vector->op_vector[u].token->type_idx].free_list, + vector->op_vector[u].token->val, new_size); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_blk_realloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + + /* Update size & value for buffer */ + prev_size = vector->op_vector[u].token->curr_size; + vector->op_vector[u].token->curr_size = new_size; + if (new_size > prev_size) + fill_h5fl_blk_vector(vector->op_vector[u].token); + } break; + + case H5FL_BLK_OP_ZERO: + if (H5FL_BLK_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_blk_token(vector->op_vector[u].token)) + return (1); + if (H5FL_BLK_ST_ZERO != vector->op_vector[u].token->state) { + vector->op_vector[u].token->state = H5FL_BLK_ST_ZERO; + memset(vector->op_vector[u].token->val, 0, vector->op_vector[u].token->curr_size); + } + break; + + case H5FL_BLK_OP_FILL1: + if (H5FL_BLK_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_blk_token(vector->op_vector[u].token)) + return (1); + if (H5FL_BLK_ST_FILL1 != vector->op_vector[u].token->state) { + vector->op_vector[u].token->state = H5FL_BLK_ST_FILL1; + memset(vector->op_vector[u].token->val, 1, vector->op_vector[u].token->curr_size); + } + break; + + case H5FL_BLK_OP_FILL2: + if (H5FL_BLK_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_blk_token(vector->op_vector[u].token)) + return (1); + if (H5FL_BLK_ST_FILL2 != vector->op_vector[u].token->state) { + vector->op_vector[u].token->state = H5FL_BLK_ST_FILL2; + memset(vector->op_vector[u].token->val, 2, vector->op_vector[u].token->curr_size); + } + break; + + case H5FL_BLK_OP_FILL3: + if (H5FL_BLK_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_blk_token(vector->op_vector[u].token)) + return (1); + if (H5FL_BLK_ST_FILL3 != vector->op_vector[u].token->state) { + vector->op_vector[u].token->state = H5FL_BLK_ST_FILL3; + memset(vector->op_vector[u].token->val, 3, vector->op_vector[u].token->curr_size); + } + break; + + case H5FL_BLK_OP_FREE: + if (H5FL_BLK_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_blk_token(vector->op_vector[u].token)) + return (1); + H5FL_blk_free(h5fl_blk_test_types[vector->op_vector[u].token->type_idx].free_list, + vector->op_vector[u].token->val); + vector->op_vector[u].token->val = NULL; + break; + + default: + assert(0 && "Invalid op code"); + abort(); + } + } + + return (0); +} + +static void +fill_h5fl_arr_vector(h5fl_arr_test_token *token) +{ + switch (token->state) { + case H5FL_ARR_ST_UNINIT: + break; + + case H5FL_ARR_ST_ZERO: + memset(token->val, 0, token->curr_size); + break; + + case H5FL_ARR_ST_FILL1: + memset(token->val, 1, token->curr_size); + break; + + case H5FL_ARR_ST_FILL2: + memset(token->val, 2, token->curr_size); + break; + + case H5FL_ARR_ST_FILL3: + memset(token->val, 3, token->curr_size); + break; + + default: + assert(0 && "Invalid state for token"); + abort(); + } +} + +static unsigned +run_h5fl_arr_vector(h5fl_arr_test_vector *vector) +{ + /* Execute test vector */ + for (unsigned u = 0; u < vector->vec_size; u++) { + switch (vector->op_vector[u].op_code) { + case H5FL_ARR_OP_MALLOC: + vector->op_vector[u].token->val = + H5FL_arr_malloc(h5fl_arr_test_types[vector->op_vector[u].param.type_idx].free_list, + h5fl_arr_test_types[vector->op_vector[u].param.type_idx].max_size); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_arr_malloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->curr_size = + h5fl_arr_test_types[vector->op_vector[u].param.type_idx].max_size; + vector->op_vector[u].token->state = H5FL_ARR_ST_UNINIT; + break; + + case H5FL_ARR_OP_CALLOC: + vector->op_vector[u].token->val = + H5FL_arr_calloc(h5fl_arr_test_types[vector->op_vector[u].param.type_idx].free_list, + h5fl_arr_test_types[vector->op_vector[u].param.type_idx].max_size); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_arr_calloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + vector->op_vector[u].token->type_idx = vector->op_vector[u].param.type_idx; + vector->op_vector[u].token->curr_size = + h5fl_arr_test_types[vector->op_vector[u].param.type_idx].max_size; + vector->op_vector[u].token->state = H5FL_ARR_ST_ZERO; + break; + + case H5FL_ARR_OP_REALLOC: { + unsigned new_size; + unsigned prev_size; + + /* Choose new size for token's buffer */ + new_size = 1 + (vector->op_vector[u].param.rng_size % + h5fl_arr_test_types[vector->op_vector[u].token->type_idx].max_size); + + /* Validate current buffer */ + if (H5FL_ARR_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_arr_token(vector->op_vector[u].token)) + return (1); + + /* Reallocate buffer */ + vector->op_vector[u].token->val = + H5FL_arr_realloc(h5fl_arr_test_types[vector->op_vector[u].token->type_idx].free_list, + vector->op_vector[u].token->val, new_size); + CHECK_PTR(vector->op_vector[u].token->val, "H5FL_arr_realloc"); + if (NULL == vector->op_vector[u].token->val) + return (1); + + /* Update size & value for buffer */ + prev_size = vector->op_vector[u].token->curr_size; + vector->op_vector[u].token->curr_size = new_size; + if (new_size > prev_size) + fill_h5fl_arr_vector(vector->op_vector[u].token); + } break; + + case H5FL_ARR_OP_ZERO: + if (H5FL_ARR_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_arr_token(vector->op_vector[u].token)) + return (1); + if (H5FL_ARR_ST_ZERO != vector->op_vector[u].token->state) { + vector->op_vector[u].token->state = H5FL_ARR_ST_ZERO; + memset(vector->op_vector[u].token->val, 0, vector->op_vector[u].token->curr_size); + } + break; + + case H5FL_ARR_OP_FILL1: + if (H5FL_ARR_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_arr_token(vector->op_vector[u].token)) + return (1); + if (H5FL_ARR_ST_FILL1 != vector->op_vector[u].token->state) { + vector->op_vector[u].token->state = H5FL_ARR_ST_FILL1; + memset(vector->op_vector[u].token->val, 1, vector->op_vector[u].token->curr_size); + } + break; + + case H5FL_ARR_OP_FILL2: + if (H5FL_ARR_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_arr_token(vector->op_vector[u].token)) + return (1); + if (H5FL_ARR_ST_FILL2 != vector->op_vector[u].token->state) { + vector->op_vector[u].token->state = H5FL_ARR_ST_FILL2; + memset(vector->op_vector[u].token->val, 2, vector->op_vector[u].token->curr_size); + } + break; + + case H5FL_ARR_OP_FILL3: + if (H5FL_ARR_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_arr_token(vector->op_vector[u].token)) + return (1); + if (H5FL_ARR_ST_FILL3 != vector->op_vector[u].token->state) { + vector->op_vector[u].token->state = H5FL_ARR_ST_FILL3; + memset(vector->op_vector[u].token->val, 3, vector->op_vector[u].token->curr_size); + } + break; + + case H5FL_ARR_OP_FREE: + if (H5FL_ARR_ST_UNINIT != vector->op_vector[u].token->state) + if (0 != validate_h5fl_arr_token(vector->op_vector[u].token)) + return (1); + H5FL_arr_free(h5fl_arr_test_types[vector->op_vector[u].token->type_idx].free_list, + vector->op_vector[u].token->val); + vector->op_vector[u].token->val = NULL; + break; + + default: + assert(0 && "Invalid op code"); + abort(); + } + } + + return (0); +} + +static H5TS_THREAD_RETURN_TYPE +thread_h5fl_reg(void *_vectors) +{ + h5fl_reg_test_vector *vectors = (h5fl_reg_test_vector *)_vectors; + unsigned errors = 0; + H5TS_THREAD_RETURN_TYPE ret_value = (H5TS_THREAD_RETURN_TYPE)0; + + /* Randomly run a number of vectors */ + for (unsigned u = 0; u < NUM_ITERS_PER_THREAD; u++) { + unsigned rng = (unsigned)h5_local_rand() % NUM_VECTORS; + + /* Run the test vector */ + errors += run_h5fl_reg_vector(&vectors[rng]); + } + + if (errors > 0) + ret_value = (H5TS_THREAD_RETURN_TYPE)1; + + return ret_value; +} + +/* 'regular' H5FL test vectors */ +static h5fl_reg_test_vector *h5fl_reg_vectors[NUM_THREADS]; + +static void +test_h5fl_reg(void) +{ + h5fl_reg_test_token *tokens[NUM_THREADS]; /* Test tokens */ + H5TS_thread_t threads[NUM_THREADS]; + herr_t result; + + /* Output message about test being performed */ + MESSAGE(7, ("Testing 'regular' H5FL operations\n")); + + /* Initialize the zero values for each type */ + for (unsigned u = 0; u < (unsigned)NELMTS(h5fl_reg_test_types); u++) { + h5fl_reg_test_types[u].zero = calloc(1, h5fl_reg_test_types[u].elmt_size); + CHECK_PTR(h5fl_reg_test_types[u].zero, "calloc"); + } + + /* Initialize the fill values for each type to RNG values */ + for (unsigned u = 0; u < (unsigned)NELMTS(h5fl_reg_test_types); u++) { + h5fl_reg_test_types[u].fill1 = malloc(h5fl_reg_test_types[u].elmt_size); + CHECK_PTR(h5fl_reg_test_types[u].fill1, "malloc"); + for (unsigned v = 0; v < h5fl_reg_test_types[u].elmt_size; v++) + h5fl_reg_test_types[u].fill1[v] = (unsigned char)h5_local_rand(); + + h5fl_reg_test_types[u].fill2 = malloc(h5fl_reg_test_types[u].elmt_size); + CHECK_PTR(h5fl_reg_test_types[u].fill2, "malloc"); + for (unsigned v = 0; v < h5fl_reg_test_types[u].elmt_size; v++) + h5fl_reg_test_types[u].fill2[v] = (unsigned char)h5_local_rand(); + + h5fl_reg_test_types[u].fill3 = malloc(h5fl_reg_test_types[u].elmt_size); + CHECK_PTR(h5fl_reg_test_types[u].fill3, "malloc"); + for (unsigned v = 0; v < h5fl_reg_test_types[u].elmt_size; v++) + h5fl_reg_test_types[u].fill3[v] = (unsigned char)h5_local_rand(); + } + + /* Initialize the test vectors */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + /* Allocate the test tokens */ + tokens[u] = calloc(MAX_TOKENS, sizeof(h5fl_reg_test_token)); + CHECK_PTR(tokens[u], "calloc"); + + /* Initialize the test vectors */ + h5fl_reg_vectors[u] = calloc(NUM_VECTORS, sizeof(h5fl_reg_test_vector)); + CHECK_PTR(h5fl_reg_vectors[u], "calloc"); + + for (unsigned v = 0; v < NUM_VECTORS; v++) + init_h5fl_reg_vector(NUM_TEST_OPS, &h5fl_reg_vectors[u][v], MAX_TOKENS, tokens[u]); + } + + /* Create threads and have them execute the vector */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + result = H5TS_thread_create(&threads[u], thread_h5fl_reg, h5fl_reg_vectors[u]); + CHECK_I(result, "H5TS_thread_create"); + } + + /* Wait for all threads */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + H5TS_THREAD_RETURN_TYPE thread_ret = (H5TS_THREAD_RETURN_TYPE)0; + + /* Join thread */ + result = H5TS_thread_join(threads[u], &thread_ret); + CHECK_I(result, "H5TS_thread_join"); + + /* Verify no errors from thread */ + VERIFY(thread_ret, (H5TS_THREAD_RETURN_TYPE)0, "error in thread"); + } + + /* Free test vectors & tokens */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + free(tokens[u]); + for (unsigned v = 0; v < NUM_VECTORS; v++) + free(h5fl_reg_vectors[u][v].op_vector); + free(h5fl_reg_vectors[u]); + } + + /* Free the zero fill values for each type */ + for (unsigned u = 0; u < (unsigned)NELMTS(h5fl_reg_test_types); u++) { + free(h5fl_reg_test_types[u].zero); + free(h5fl_reg_test_types[u].fill1); + free(h5fl_reg_test_types[u].fill2); + free(h5fl_reg_test_types[u].fill3); + } +} + +static H5TS_THREAD_RETURN_TYPE +thread_h5fl_fac(void *_vectors) +{ + h5fl_fac_test_vector *vectors = (h5fl_fac_test_vector *)_vectors; + unsigned errors = 0; + H5TS_THREAD_RETURN_TYPE ret_value = (H5TS_THREAD_RETURN_TYPE)0; + + /* Randomly run a number of vectors */ + for (unsigned u = 0; u < NUM_ITERS_PER_THREAD; u++) { + unsigned rng = (unsigned)h5_local_rand() % NUM_VECTORS; + + /* Run the test vector */ + errors += run_h5fl_fac_vector(&vectors[rng]); + } + + if (errors > 0) + ret_value = (H5TS_THREAD_RETURN_TYPE)1; + + return ret_value; +} + +/* 'factory' H5FL test vectors */ +static h5fl_fac_test_vector *h5fl_fac_vectors[NUM_THREADS]; + +static void +test_h5fl_fac(void) +{ + h5fl_fac_test_token *tokens[NUM_THREADS]; /* Test tokens */ + H5TS_thread_t threads[NUM_THREADS]; + herr_t result; + + /* Output message about test being performed */ + MESSAGE(7, ("Testing 'factory' H5FL operations\n")); + + /* Initialize the free list factory for each block size */ + for (unsigned u = 0; u < (unsigned)NELMTS(h5fl_fac_test_types); u++) { + h5fl_fac_test_types[u].free_list = H5FL_fac_init(h5fl_fac_test_types[u].elmt_size); + CHECK_PTR(h5fl_fac_test_types[u].free_list, "H5FL_fac_init"); + } + + /* Initialize the zero values for each type */ + for (unsigned u = 0; u < (unsigned)NELMTS(h5fl_fac_test_types); u++) { + h5fl_fac_test_types[u].zero = calloc(1, h5fl_fac_test_types[u].elmt_size); + CHECK_PTR(h5fl_fac_test_types[u].zero, "calloc"); + } + + /* Initialize the fill values for each type to RNG values */ + for (unsigned u = 0; u < (unsigned)NELMTS(h5fl_fac_test_types); u++) { + h5fl_fac_test_types[u].fill1 = malloc(h5fl_fac_test_types[u].elmt_size); + CHECK_PTR(h5fl_fac_test_types[u].fill1, "malloc"); + for (unsigned v = 0; v < h5fl_fac_test_types[u].elmt_size; v++) + h5fl_fac_test_types[u].fill1[v] = (unsigned char)h5_local_rand(); + + h5fl_fac_test_types[u].fill2 = malloc(h5fl_fac_test_types[u].elmt_size); + CHECK_PTR(h5fl_fac_test_types[u].fill2, "malloc"); + for (unsigned v = 0; v < h5fl_fac_test_types[u].elmt_size; v++) + h5fl_fac_test_types[u].fill2[v] = (unsigned char)h5_local_rand(); + + h5fl_fac_test_types[u].fill3 = malloc(h5fl_fac_test_types[u].elmt_size); + CHECK_PTR(h5fl_fac_test_types[u].fill3, "malloc"); + for (unsigned v = 0; v < h5fl_fac_test_types[u].elmt_size; v++) + h5fl_fac_test_types[u].fill3[v] = (unsigned char)h5_local_rand(); + } + + /* Initialize the test vectors */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + /* Allocate the test tokens */ + tokens[u] = calloc(MAX_TOKENS, sizeof(h5fl_fac_test_token)); + CHECK_PTR(tokens[u], "calloc"); + + /* Initialize the test vectors */ + h5fl_fac_vectors[u] = calloc(NUM_VECTORS, sizeof(h5fl_fac_test_vector)); + CHECK_PTR(h5fl_fac_vectors[u], "calloc"); + + for (unsigned v = 0; v < NUM_VECTORS; v++) + init_h5fl_fac_vector(NUM_TEST_OPS, &h5fl_fac_vectors[u][v], MAX_TOKENS, tokens[u]); + } + + /* Create threads and have them execute the vector */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + result = H5TS_thread_create(&threads[u], thread_h5fl_fac, h5fl_fac_vectors[u]); + CHECK_I(result, "H5TS_thread_create"); + } + + /* Wait for all threads */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + H5TS_THREAD_RETURN_TYPE thread_ret = (H5TS_THREAD_RETURN_TYPE)0; + + /* Join thread */ + result = H5TS_thread_join(threads[u], &thread_ret); + CHECK_I(result, "H5TS_thread_join"); + + /* Verify no errors from thread */ + VERIFY(thread_ret, (H5TS_THREAD_RETURN_TYPE)0, "error in thread"); + } + + /* Free test vectors & tokens */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + free(tokens[u]); + for (unsigned v = 0; v < NUM_VECTORS; v++) + free(h5fl_fac_vectors[u][v].op_vector); + free(h5fl_fac_vectors[u]); + } + + /* Release the free list factory for each block size */ + for (unsigned u = 0; u < (unsigned)NELMTS(h5fl_fac_test_types); u++) { + result = H5FL_fac_term(h5fl_fac_test_types[u].free_list); + CHECK_I(result, "H5FL_fac_term"); + } + + /* Free the zero fill values for each type */ + for (unsigned u = 0; u < (unsigned)NELMTS(h5fl_fac_test_types); u++) { + free(h5fl_fac_test_types[u].zero); + free(h5fl_fac_test_types[u].fill1); + free(h5fl_fac_test_types[u].fill2); + free(h5fl_fac_test_types[u].fill3); + } +} + +static H5TS_THREAD_RETURN_TYPE +thread_h5fl_blk(void *_vectors) +{ + h5fl_blk_test_vector *vectors = (h5fl_blk_test_vector *)_vectors; + unsigned errors = 0; + H5TS_THREAD_RETURN_TYPE ret_value = (H5TS_THREAD_RETURN_TYPE)0; + + /* Randomly run a number of vectors */ + for (unsigned u = 0; u < NUM_ITERS_PER_THREAD; u++) { + unsigned rng = (unsigned)h5_local_rand() % NUM_VECTORS; + + /* Run the test vector */ + errors += run_h5fl_blk_vector(&vectors[rng]); + } + + if (errors > 0) + ret_value = (H5TS_THREAD_RETURN_TYPE)1; + + return ret_value; +} + +/* 'block' H5FL test vectors */ +static h5fl_blk_test_vector *h5fl_blk_vectors[NUM_THREADS]; + +static void +test_h5fl_blk(void) +{ + h5fl_blk_test_token *tokens[NUM_THREADS]; /* Test tokens */ + H5TS_thread_t threads[NUM_THREADS]; + herr_t result; + + /* Output message about test being performed */ + MESSAGE(7, ("Testing 'block' H5FL operations\n")); + + /* Initialize the test vectors */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + /* Allocate the test tokens */ + tokens[u] = calloc(MAX_TOKENS, sizeof(h5fl_blk_test_token)); + CHECK_PTR(tokens[u], "calloc"); + + /* Initialize the test vectors */ + h5fl_blk_vectors[u] = calloc(NUM_VECTORS, sizeof(h5fl_blk_test_vector)); + CHECK_PTR(h5fl_blk_vectors[u], "calloc"); + + for (unsigned v = 0; v < NUM_VECTORS; v++) + init_h5fl_blk_vector(NUM_TEST_OPS, &h5fl_blk_vectors[u][v], MAX_TOKENS, tokens[u]); + } + + /* Create threads and have them execute the vector */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + result = H5TS_thread_create(&threads[u], thread_h5fl_blk, h5fl_blk_vectors[u]); + CHECK_I(result, "H5TS_thread_create"); + } + + /* Wait for all threads */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + H5TS_THREAD_RETURN_TYPE thread_ret = (H5TS_THREAD_RETURN_TYPE)0; + + /* Join thread */ + result = H5TS_thread_join(threads[u], &thread_ret); + CHECK_I(result, "H5TS_thread_join"); + + /* Verify no errors from thread */ + VERIFY(thread_ret, (H5TS_THREAD_RETURN_TYPE)0, "error in thread"); + } + + /* Free test vectors & tokens */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + free(tokens[u]); + for (unsigned v = 0; v < NUM_VECTORS; v++) + free(h5fl_blk_vectors[u][v].op_vector); + free(h5fl_blk_vectors[u]); + } +} + +static H5TS_THREAD_RETURN_TYPE +thread_h5fl_arr(void *_vectors) +{ + h5fl_arr_test_vector *vectors = (h5fl_arr_test_vector *)_vectors; + unsigned errors = 0; + H5TS_THREAD_RETURN_TYPE ret_value = (H5TS_THREAD_RETURN_TYPE)0; + + /* Randomly run a number of vectors */ + for (unsigned u = 0; u < NUM_ITERS_PER_THREAD; u++) { + unsigned rng = (unsigned)h5_local_rand() % NUM_VECTORS; + + /* Run the test vector */ + errors += run_h5fl_arr_vector(&vectors[rng]); + } + + if (errors > 0) + ret_value = (H5TS_THREAD_RETURN_TYPE)1; + + return ret_value; +} + +/* 'array' H5FL test vectors */ +static h5fl_arr_test_vector *h5fl_arr_vectors[NUM_THREADS]; + +static void +test_h5fl_arr(void) +{ + h5fl_arr_test_token *tokens[NUM_THREADS]; /* Test tokens */ + H5TS_thread_t threads[NUM_THREADS]; + herr_t result; + + /* Output message about test being performed */ + MESSAGE(7, ("Testing 'array' H5FL operations\n")); + + /* Initialize the test vectors */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + /* Allocate the test tokens */ + tokens[u] = calloc(MAX_TOKENS, sizeof(h5fl_arr_test_token)); + CHECK_PTR(tokens[u], "calloc"); + + /* Initialize the test vectors */ + h5fl_arr_vectors[u] = calloc(NUM_VECTORS, sizeof(h5fl_arr_test_vector)); + CHECK_PTR(h5fl_arr_vectors[u], "calloc"); + + for (unsigned v = 0; v < NUM_VECTORS; v++) + init_h5fl_arr_vector(NUM_TEST_OPS, &h5fl_arr_vectors[u][v], MAX_TOKENS, tokens[u]); + } + + /* Create threads and have them execute the vector */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + result = H5TS_thread_create(&threads[u], thread_h5fl_arr, h5fl_arr_vectors[u]); + CHECK_I(result, "H5TS_thread_create"); + } + + /* Wait for all threads */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + H5TS_THREAD_RETURN_TYPE thread_ret = (H5TS_THREAD_RETURN_TYPE)0; + + /* Join thread */ + result = H5TS_thread_join(threads[u], &thread_ret); + CHECK_I(result, "H5TS_thread_join"); + + /* Verify no errors from thread */ + VERIFY(thread_ret, (H5TS_THREAD_RETURN_TYPE)0, "error in thread"); + } + + /* Free test vectors & tokens */ + for (unsigned u = 0; u < NUM_THREADS; u++) { + free(tokens[u]); + for (unsigned v = 0; v < NUM_VECTORS; v++) + free(h5fl_arr_vectors[u][v].op_vector); + free(h5fl_arr_vectors[u]); + } +} + +/* + ********************************************************************** + * Test H5FL package + ********************************************************************** + */ +void +tts_h5fl(void H5_ATTR_UNUSED *params) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing threadsafe H5FL operations\n")); + + /* Set up local RNG */ + h5_setup_local_rand("tts_h5fl", 0); + + /* Run tests */ + test_h5fl_reg(); + test_h5fl_fac(); + test_h5fl_blk(); + test_h5fl_arr(); +} /* end tts_h5fl() */ + +#endif /*H5_HAVE_THREADS*/