diff --git a/release_docs/CHANGELOG.md b/release_docs/CHANGELOG.md index d08176bc677..65aa3b30172 100644 --- a/release_docs/CHANGELOG.md +++ b/release_docs/CHANGELOG.md @@ -241,6 +241,8 @@ The Virtual Dataset Global Heap Block format has been updated to version 1 to su Use of the shared strings option for Virtual Datasets reduces memory overhead and optimizes dataset close operations. +The chunked dataset file format has been updated to always use 64 bits to encode the size of filtered chunks. This will allow data filters that expand the chunks by a large amount to still work. Chunk sizes are still limited to `2^32 - 1`. This new format is only used when the HDF5 library version bounds lower bound is set to 2.0 or later. + ### The `H5Dread_chunk()` signature has changed A new parameter, `nalloc`, has been added to `H5Dread_chunk()`. This parameter contains a pointer to a variable that holds the size of the buffer buf. If *nalloc is not large enough to hold the entire chunk being read, no data is read. On exit, the value of this variable is set to the buffer size needed to read the chunk. diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index 4db3e847a47..0587f77b16a 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -41,7 +41,7 @@ /* Local Macros */ /****************/ -#define H5D_BTREE_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree.shared) +#define H5D_BTREE_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->layout->storage.u.chunk.u.btree.shared) /******************/ /* Local Typedefs */ @@ -819,13 +819,13 @@ H5D__btree_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNUS assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); assert(H5_addr_defined(dset_ohdr_addr)); - idx_info->storage->u.btree.dset_ohdr_addr = dset_ohdr_addr; + idx_info->layout->storage.u.chunk.u.btree.dset_ohdr_addr = dset_ohdr_addr; /* Allocate the shared structure */ - if (H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout) < 0) + if (H5D__btree_shared_create(idx_info->f, &idx_info->layout->storage.u.chunk, + &idx_info->layout->u.chunk) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info"); done: @@ -861,15 +861,14 @@ H5D__btree_idx_create(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(!H5_addr_defined(idx_info->storage->idx_addr)); + assert(!H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); /* Initialize "user" data for B-tree callbacks, etc. */ - udata.layout = idx_info->layout; - udata.storage = idx_info->storage; + udata.layout = &idx_info->layout->u.chunk; + udata.storage = &idx_info->layout->storage.u.chunk; /* Create the v1 B-tree for the chunk index */ - if (H5B_create(idx_info->f, H5B_BTREE, &udata, &(idx_info->storage->idx_addr) /*out*/) < 0) + if (H5B_create(idx_info->f, H5B_BTREE, &udata, &(idx_info->layout->storage.u.chunk.idx_addr) /*out*/) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create B-tree"); done: @@ -929,8 +928,8 @@ H5D__btree_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) FUNC_ENTER_PACKAGE_NOERR assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_BTREE == idx_info->storage->idx_type); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_BTREE == idx_info->layout->storage.u.chunk.idx_type); assert(is_open); *is_open = H5D_BTREE_IDX_IS_OPEN(idx_info); @@ -979,15 +978,14 @@ H5D__btree_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* * Create the chunk it if it doesn't exist, or reallocate the chunk if * its size changed. */ - if (H5B_insert(idx_info->f, H5B_BTREE, idx_info->storage->idx_addr, udata) < 0) + if (H5B_insert(idx_info->f, H5B_BTREE, idx_info->layout->storage.u.chunk.idx_addr, udata) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk"); done: @@ -1017,14 +1015,13 @@ H5D__btree_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udat assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->layout->ndims > 0); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(idx_info->layout->u.chunk.ndims > 0); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Go get the chunk information from the B-tree */ found = false; - if (H5B_find(idx_info->f, H5B_BTREE, idx_info->storage->idx_addr, &found, udata) < 0) + if (H5B_find(idx_info->f, H5B_BTREE, idx_info->layout->storage.u.chunk.idx_addr, &found, udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "can't check for chunk in B-tree"); done: @@ -1115,20 +1112,19 @@ H5D__btree_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t c assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(chunk_cb); assert(chunk_udata); /* Initialize userdata */ memset(&udata, 0, sizeof udata); - udata.common.layout = idx_info->layout; - udata.common.storage = idx_info->storage; + udata.common.layout = &idx_info->layout->u.chunk; + udata.common.storage = &idx_info->layout->storage.u.chunk; udata.cb = chunk_cb; udata.udata = chunk_udata; /* Iterate over existing chunks */ - if ((ret_value = H5B_iterate(idx_info->f, H5B_BTREE, idx_info->storage->idx_addr, + if ((ret_value = H5B_iterate(idx_info->f, H5B_BTREE, idx_info->layout->storage.u.chunk.idx_addr, H5D__btree_idx_iterate_cb, &udata)) < 0) HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over chunk B-tree"); @@ -1155,14 +1151,13 @@ H5D__btree_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Remove the chunk from the v1 B-tree index and release the space for the * chunk (in the B-tree callback). */ - if (H5B_remove(idx_info->f, H5B_BTREE, idx_info->storage->idx_addr, udata) < 0) + if (H5B_remove(idx_info->f, H5B_BTREE, idx_info->layout->storage.u.chunk.idx_addr, udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to remove chunk entry"); done: @@ -1192,23 +1187,22 @@ H5D__btree_idx_delete(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); /* Check if the index data structure has been allocated */ - if (H5_addr_defined(idx_info->storage->idx_addr)) { + if (H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)) { H5O_storage_chunk_t tmp_storage; /* Local copy of storage info */ H5D_chunk_common_ud_t udata; /* User data for B-tree operations */ /* Set up temporary chunked storage info */ - tmp_storage = *idx_info->storage; + tmp_storage = idx_info->layout->storage.u.chunk; /* Set up the shared structure */ - if (H5D__btree_shared_create(idx_info->f, &tmp_storage, idx_info->layout) < 0) + if (H5D__btree_shared_create(idx_info->f, &tmp_storage, &idx_info->layout->u.chunk) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info"); /* Set up B-tree user data */ memset(&udata, 0, sizeof udata); - udata.layout = idx_info->layout; + udata.layout = &idx_info->layout->u.chunk; udata.storage = &tmp_storage; /* Delete entire B-tree */ @@ -1246,25 +1240,25 @@ H5D__btree_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_ assert(idx_info_src->f); assert(idx_info_src->pline); assert(idx_info_src->layout); - assert(idx_info_src->storage); assert(idx_info_dst); assert(idx_info_dst->f); assert(idx_info_dst->pline); assert(idx_info_dst->layout); - assert(idx_info_dst->storage); - assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); + assert(!H5_addr_defined(idx_info_dst->layout->storage.u.chunk.idx_addr)); /* Create shared B-tree info for each file */ - if (H5D__btree_shared_create(idx_info_src->f, idx_info_src->storage, idx_info_src->layout) < 0) + if (H5D__btree_shared_create(idx_info_src->f, &idx_info_src->layout->storage.u.chunk, + &idx_info_src->layout->u.chunk) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for source shared B-tree info"); - if (H5D__btree_shared_create(idx_info_dst->f, idx_info_dst->storage, idx_info_dst->layout) < 0) + if (H5D__btree_shared_create(idx_info_dst->f, &idx_info_dst->layout->storage.u.chunk, + &idx_info_dst->layout->u.chunk) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for destination shared B-tree info"); /* Create the root of the B-tree that describes chunked storage in the dest. file */ if (H5D__btree_idx_create(idx_info_dst) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize chunked storage"); - assert(H5_addr_defined(idx_info_dst->storage->idx_addr)); + assert(H5_addr_defined(idx_info_dst->layout->storage.u.chunk.idx_addr)); done: FUNC_LEAVE_NOAPI_TAG(ret_value) @@ -1323,16 +1317,16 @@ H5D__btree_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); assert(index_size); /* Initialize B-tree node user-data */ memset(&udata, 0, sizeof udata); - udata.layout = idx_info->layout; - udata.storage = idx_info->storage; + udata.layout = &idx_info->layout->u.chunk; + udata.storage = &idx_info->layout->storage.u.chunk; /* Get metadata information for B-tree */ - if (H5B_get_info(idx_info->f, H5B_BTREE, idx_info->storage->idx_addr, &bt_info, NULL, &udata) < 0) + if (H5B_get_info(idx_info->f, H5B_BTREE, idx_info->layout->storage.u.chunk.idx_addr, &bt_info, NULL, + &udata) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to iterate over chunk B-tree"); /* Set the size of the B-tree */ @@ -1408,12 +1402,11 @@ H5D__btree_idx_dest(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); /* Free the raw B-tree node buffer */ - if (NULL == idx_info->storage->u.btree.shared) + if (NULL == idx_info->layout->storage.u.chunk.u.btree.shared) HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil"); - if (H5UC_DEC(idx_info->storage->u.btree.shared) < 0) + if (H5UC_DEC(idx_info->layout->storage.u.chunk.u.btree.shared) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page"); done: diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c index c93ee87223f..ab02a2b8ed0 100644 --- a/src/H5Dbtree2.c +++ b/src/H5Dbtree2.c @@ -37,17 +37,40 @@ /* Local Macros */ /****************/ -#define H5D_BT2_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree2.bt2) +#define H5D_BT2_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->layout->storage.u.chunk.u.btree2.bt2) + +/* + * Macro to compute the size required for encoding the size of a chunk. For version 4, this is the minimum + * number of bytes required to encode the size of an unfiltered chunk plus an extra byte, in case the filter + * makes the chunk larger. For versions after 4, this is simply the size of lengths for the file. For + * unfiltered chunks, this is 0. + */ +#define H5D_BT2_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, idx_info) \ + do { \ + if ((idx_info)->pline->nused > 0) { \ + if ((idx_info)->layout->version > H5O_LAYOUT_VERSION_4) \ + (chunk_size_len) = H5F_SIZEOF_SIZE((idx_info)->f); \ + else { \ + (chunk_size_len) = \ + 1 + ((H5VM_log2_gen((uint64_t)(idx_info)->layout->u.chunk.size) + 8) / 8); \ + if ((chunk_size_len) > 8) \ + (chunk_size_len) = 8; \ + } \ + } \ + else \ + (chunk_size_len) = 0; \ + } while (0) /******************/ /* Local Typedefs */ /******************/ /* User data for creating callback context */ typedef struct H5D_bt2_ctx_ud_t { - const H5F_t *f; /* Pointer to file info */ - uint32_t chunk_size; /* Size of chunk (bytes; for filtered object) */ - unsigned ndims; /* Number of dimensions */ - uint32_t *dim; /* Size of chunk in elements */ + const H5F_t *f; /* Pointer to file info */ + uint32_t chunk_size; /* Size of chunk (bytes; for filtered object) */ + unsigned ndims; /* Number of dimensions */ + size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */ + uint32_t *dim; /* Size of chunk in elements */ } H5D_bt2_ctx_ud_t; /* The callback context */ @@ -236,10 +259,12 @@ H5D__bt2_crt_context(void *_udata) if (NULL == (ctx = H5FL_MALLOC(H5D_bt2_ctx_t))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate callback context"); - /* Determine the size of addresses and set the chunk size and # of dimensions for the dataset */ - ctx->sizeof_addr = H5F_SIZEOF_ADDR(udata->f); - ctx->chunk_size = udata->chunk_size; - ctx->ndims = udata->ndims; + /* Determine the size of addresses and set the chunk size, # of dimensions for the dataset, and bytes used + * to encode the chunk size */ + ctx->sizeof_addr = H5F_SIZEOF_ADDR(udata->f); + ctx->chunk_size = udata->chunk_size; + ctx->ndims = udata->ndims; + ctx->chunk_size_len = udata->chunk_size_len; /* Set up the "local" information for this dataset's chunk dimension sizes */ if (NULL == (my_dim = (uint32_t *)H5FL_ARR_MALLOC(uint32_t, H5O_LAYOUT_NDIMS))) @@ -247,14 +272,6 @@ H5D__bt2_crt_context(void *_udata) H5MM_memcpy(my_dim, udata->dim, H5O_LAYOUT_NDIMS * sizeof(uint32_t)); ctx->dim = my_dim; - /* - * Compute the size required for encoding the size of a chunk, - * allowing for an extra byte, in case the filter makes the chunk larger. - */ - ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8); - if (ctx->chunk_size_len > 8) - ctx->chunk_size_len = 8; - /* Set return value */ ret_value = ctx; @@ -565,7 +582,7 @@ H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, const void *_record, c *------------------------------------------------------------------------- */ static herr_t -H5D__bt2_idx_init(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, const H5S_t H5_ATTR_UNUSED *space, +H5D__bt2_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNUSED *space, haddr_t dset_ohdr_addr) { FUNC_ENTER_PACKAGE_NOERR @@ -573,7 +590,7 @@ H5D__bt2_idx_init(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, const H5S_t /* Check args */ assert(H5_addr_defined(dset_ohdr_addr)); - idx_info->storage->u.btree2.dset_ohdr_addr = dset_ohdr_addr; + idx_info->layout->storage.u.chunk.u.btree2.dset_ohdr_addr = dset_ohdr_addr; FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__bt2_idx_init() */ @@ -605,16 +622,15 @@ H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info) assert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE); assert(idx_info->pline); assert(idx_info->layout); - assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_BT2 == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(idx_info->storage->u.btree2.bt2); + assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->u.chunk.idx_type); + assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->storage.u.chunk.idx_type); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); + assert(idx_info->layout->storage.u.chunk.u.btree2.bt2); /* Set up object header location for dataset */ H5O_loc_reset(&oloc); oloc.file = idx_info->f; - oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr; + oloc.addr = idx_info->layout->storage.u.chunk.u.btree.dset_ohdr_addr; /* Get header */ if (NULL == (oh = H5O_protect(&oloc, H5AC__READ_ONLY_FLAG, true))) @@ -625,7 +641,7 @@ H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get dataset object header proxy"); /* Make the v2 B-tree a child flush dependency of the dataset's object header proxy */ - if (H5B2_depend(idx_info->storage->u.btree2.bt2, oh_proxy) < 0) + if (H5B2_depend(idx_info->layout->storage.u.chunk.u.btree2.bt2, oh_proxy) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header proxy"); @@ -649,9 +665,10 @@ H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info) static herr_t H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info) { - H5B2_create_t bt2_cparam; /* v2 B-tree creation parameters */ - H5D_bt2_ctx_ud_t u_ctx; /* data for context call */ - herr_t ret_value = SUCCEED; /* Return value */ + H5B2_create_t bt2_cparam; /* v2 B-tree creation parameters */ + H5D_bt2_ctx_ud_t u_ctx; /* data for context call */ + unsigned chunk_size_len = 0; /* Size of encoded chunk size */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -660,45 +677,43 @@ H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(!H5_addr_defined(idx_info->storage->idx_addr)); + assert(!H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); - bt2_cparam.rrec_size = H5F_SIZEOF_ADDR(idx_info->f) /* Address of chunk */ - + (idx_info->layout->ndims - 1) * 8; /* # of dimensions x 64-bit chunk offsets */ + /* Compute number of bytes used to encode the chunk size */ + H5D_BT2_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, idx_info); + + /* Set up b-tree creation parameters */ + bt2_cparam.rrec_size = + H5F_SIZEOF_ADDR(idx_info->f) /* Address of chunk */ + + (idx_info->layout->u.chunk.ndims - 1) * 8; /* # of dimensions x 64-bit chunk offsets */ /* General parameters */ if (idx_info->pline->nused > 0) { - unsigned chunk_size_len; /* Size of encoded chunk size */ - - /* - * Compute the size required for encoding the size of a chunk, - * allowing for an extra byte, in case the filter makes the chunk larger. - */ - chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8); - if (chunk_size_len > 8) - chunk_size_len = 8; - bt2_cparam.rrec_size += chunk_size_len + 4; /* Size of encoded chunk size & filter mask */ bt2_cparam.cls = H5D_BT2_FILT; } /* end if */ else bt2_cparam.cls = H5D_BT2; - bt2_cparam.node_size = idx_info->layout->u.btree2.cparam.node_size; - bt2_cparam.split_percent = idx_info->layout->u.btree2.cparam.split_percent; - bt2_cparam.merge_percent = idx_info->layout->u.btree2.cparam.merge_percent; + bt2_cparam.node_size = idx_info->layout->u.chunk.u.btree2.cparam.node_size; + bt2_cparam.split_percent = idx_info->layout->u.chunk.u.btree2.cparam.split_percent; + bt2_cparam.merge_percent = idx_info->layout->u.chunk.u.btree2.cparam.merge_percent; - u_ctx.f = idx_info->f; - u_ctx.ndims = idx_info->layout->ndims - 1; - u_ctx.chunk_size = idx_info->layout->size; - u_ctx.dim = idx_info->layout->dim; + /* Set up client context */ + u_ctx.f = idx_info->f; + u_ctx.ndims = idx_info->layout->u.chunk.ndims - 1; + u_ctx.chunk_size = idx_info->layout->u.chunk.size; + u_ctx.dim = idx_info->layout->u.chunk.dim; + u_ctx.chunk_size_len = (size_t)chunk_size_len; /* Create the v2 B-tree for the chunked dataset */ - if (NULL == (idx_info->storage->u.btree2.bt2 = H5B2_create(idx_info->f, &bt2_cparam, &u_ctx))) + if (NULL == + (idx_info->layout->storage.u.chunk.u.btree2.bt2 = H5B2_create(idx_info->f, &bt2_cparam, &u_ctx))) HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create v2 B-tree for tracking chunked dataset"); /* Retrieve the v2 B-tree's address in the file */ - if (H5B2_get_addr(idx_info->storage->u.btree2.bt2, &(idx_info->storage->idx_addr)) < 0) + if (H5B2_get_addr(idx_info->layout->storage.u.chunk.u.btree2.bt2, + &(idx_info->layout->storage.u.chunk.idx_addr)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get v2 B-tree address for tracking chunked dataset"); @@ -741,20 +756,22 @@ H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.btree2.bt2); + assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->u.chunk.idx_type); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); + assert(NULL == idx_info->layout->storage.u.chunk.u.btree2.bt2); /* Set up the user data */ u_ctx.f = idx_info->f; - u_ctx.ndims = idx_info->layout->ndims - 1; - u_ctx.chunk_size = idx_info->layout->size; - u_ctx.dim = idx_info->layout->dim; + u_ctx.ndims = idx_info->layout->u.chunk.ndims - 1; + u_ctx.chunk_size = idx_info->layout->u.chunk.size; + u_ctx.dim = idx_info->layout->u.chunk.dim; + + /* Compute number of bytes used to encode the chunk size */ + H5D_BT2_COMPUTE_CHUNK_SIZE_LEN(u_ctx.chunk_size_len, idx_info); /* Open v2 B-tree for the chunk index */ - if (NULL == - (idx_info->storage->u.btree2.bt2 = H5B2_open(idx_info->f, idx_info->storage->idx_addr, &u_ctx))) + if (NULL == (idx_info->layout->storage.u.chunk.u.btree2.bt2 = + H5B2_open(idx_info->f, idx_info->layout->storage.u.chunk.idx_addr, &u_ctx))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open v2 B-tree for tracking chunked dataset"); /* Check for SWMR writes to the file */ @@ -785,13 +802,13 @@ H5D__bt2_idx_close(const H5D_chk_idx_info_t *idx_info) FUNC_ENTER_PACKAGE assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_BT2 == idx_info->storage->idx_type); - assert(idx_info->storage->u.btree2.bt2); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->storage.u.chunk.idx_type); + assert(idx_info->layout->storage.u.chunk.u.btree2.bt2); - if (H5B2_close(idx_info->storage->u.btree2.bt2) < 0) + if (H5B2_close(idx_info->layout->storage.u.chunk.u.btree2.bt2) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close v2 B-tree"); - idx_info->storage->u.btree2.bt2 = NULL; + idx_info->layout->storage.u.chunk.u.btree2.bt2 = NULL; done: FUNC_LEAVE_NOAPI(ret_value) @@ -812,8 +829,8 @@ H5D__bt2_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) FUNC_ENTER_PACKAGE_NOERR assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_BT2 == idx_info->storage->idx_type); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->storage.u.chunk.idx_type); assert(is_open); *is_open = H5D_BT2_IDX_IS_OPEN(idx_info); @@ -915,8 +932,7 @@ H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); assert(H5_addr_defined(udata->chunk_block.offset)); @@ -927,24 +943,24 @@ H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); } /* end if */ else /* Patch the top level file pointer contained in bt2 if needed */ - if (H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) + if (H5B2_patch_file(idx_info->layout->storage.u.chunk.u.btree2.bt2, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer"); /* Set convenience pointer to v2 B-tree structure */ - bt2 = idx_info->storage->u.btree2.bt2; + bt2 = idx_info->layout->storage.u.chunk.u.btree2.bt2; /* Set up callback info */ - bt2_udata.ndims = idx_info->layout->ndims - 1; + bt2_udata.ndims = idx_info->layout->u.chunk.ndims - 1; bt2_udata.rec.chunk_addr = udata->chunk_block.offset; if (idx_info->pline->nused > 0) { /* filtered chunk */ H5_CHECKED_ASSIGN(bt2_udata.rec.nbytes, uint32_t, udata->chunk_block.length, hsize_t); bt2_udata.rec.filter_mask = udata->filter_mask; } /* end if */ else { /* non-filtered chunk */ - bt2_udata.rec.nbytes = idx_info->layout->size; + bt2_udata.rec.nbytes = idx_info->layout->u.chunk.size; bt2_udata.rec.filter_mask = 0; } /* end else */ - for (u = 0; u < (idx_info->layout->ndims - 1); u++) + for (u = 0; u < (idx_info->layout->u.chunk.ndims - 1); u++) bt2_udata.rec.scaled[u] = udata->common.scaled[u]; /* Update record for v2 B-tree (could be insert or modify) */ @@ -1005,9 +1021,8 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->layout->ndims > 0); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(idx_info->layout->u.chunk.ndims > 0); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Check if the v2 B-tree is open yet */ @@ -1017,11 +1032,11 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); } /* end if */ else /* Patch the top level file pointer contained in bt2 if needed */ - if (H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) + if (H5B2_patch_file(idx_info->layout->storage.u.chunk.u.btree2.bt2, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer"); /* Set convenience pointer to v2 B-tree structure */ - bt2 = idx_info->storage->u.btree2.bt2; + bt2 = idx_info->layout->storage.u.chunk.u.btree2.bt2; /* Clear the found record */ found_rec.chunk_addr = HADDR_UNDEF; @@ -1030,10 +1045,10 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) /* Prepare user data for compare callback */ bt2_udata.rec.chunk_addr = HADDR_UNDEF; - bt2_udata.ndims = idx_info->layout->ndims - 1; + bt2_udata.ndims = idx_info->layout->u.chunk.ndims - 1; /* Set the chunk offset to be searched for */ - for (u = 0; u < (idx_info->layout->ndims - 1); u++) + for (u = 0; u < (idx_info->layout->u.chunk.ndims - 1); u++) bt2_udata.rec.scaled[u] = udata->common.scaled[u]; /* Go get chunk information from v2 B-tree */ @@ -1055,7 +1070,7 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) udata->filter_mask = found_rec.filter_mask; } /* end if */ else { /* non-filtered chunk */ - udata->chunk_block.length = idx_info->layout->size; + udata->chunk_block.length = idx_info->layout->u.chunk.size; udata->filter_mask = 0; } /* end else */ } /* end if */ @@ -1080,7 +1095,7 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) *------------------------------------------------------------------------- */ static herr_t -H5D__bt2_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +H5D__bt2_idx_load_metadata(const H5D_chk_idx_info_t *idx_info) { H5D_chunk_ud_t chunk_ud; hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; @@ -1095,8 +1110,8 @@ H5D__bt2_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) * a good way of controlling that explicitly, perform a fake * lookup of a chunk to cause it to be read in. */ - chunk_ud.common.layout = idx_info->layout; - chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.layout = &idx_info->layout->u.chunk; + chunk_ud.common.storage = &idx_info->layout->storage.u.chunk; chunk_ud.common.scaled = scaled; chunk_ud.chunk_block.offset = HADDR_UNDEF; @@ -1166,8 +1181,7 @@ H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chu assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(chunk_cb); assert(chunk_udata); @@ -1178,11 +1192,11 @@ H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chu HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); } /* end if */ else /* Patch the top level file pointer contained in bt2 if needed */ - if (H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) + if (H5B2_patch_file(idx_info->layout->storage.u.chunk.u.btree2.bt2, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer"); /* Set convenience pointer to v2 B-tree structure */ - bt2 = idx_info->storage->u.btree2.bt2; + bt2 = idx_info->layout->storage.u.chunk.u.btree2.bt2; /* Prepare user data for iterate callback */ udata.cb = chunk_cb; @@ -1255,8 +1269,7 @@ H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *u assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Check if the v2 B-tree is open yet */ @@ -1266,17 +1279,17 @@ H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *u HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); } /* end if */ else /* Patch the top level file pointer contained in bt2 if needed */ - if (H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) + if (H5B2_patch_file(idx_info->layout->storage.u.chunk.u.btree2.bt2, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer"); /* Set convenience pointer to v2 B-tree structure */ - bt2 = idx_info->storage->u.btree2.bt2; + bt2 = idx_info->layout->storage.u.chunk.u.btree2.bt2; /* Prepare user data for compare callback */ - bt2_udata.ndims = idx_info->layout->ndims - 1; + bt2_udata.ndims = idx_info->layout->u.chunk.ndims - 1; /* Initialize the record to search for */ - for (u = 0; u < (idx_info->layout->ndims - 1); u++) + for (u = 0; u < (idx_info->layout->u.chunk.ndims - 1); u++) bt2_udata.rec.scaled[u] = udata->scaled[u]; /* Remove the record for the "dataset chunk" object from the v2 B-tree */ @@ -1315,15 +1328,17 @@ H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); /* Check if the index data structure has been allocated */ - if (H5_addr_defined(idx_info->storage->idx_addr)) { + if (H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)) { /* Set up user data for creating context */ u_ctx.f = idx_info->f; - u_ctx.ndims = idx_info->layout->ndims - 1; - u_ctx.chunk_size = idx_info->layout->size; - u_ctx.dim = idx_info->layout->dim; + u_ctx.ndims = idx_info->layout->u.chunk.ndims - 1; + u_ctx.chunk_size = idx_info->layout->u.chunk.size; + u_ctx.dim = idx_info->layout->u.chunk.dim; + + /* Compute number of bytes used to encode the chunk size */ + H5D_BT2_COMPUTE_CHUNK_SIZE_LEN(u_ctx.chunk_size_len, idx_info); /* Set remove operation. Do not remove chunks in SWMR_WRITE mode */ if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) @@ -1333,10 +1348,11 @@ H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info) /* Delete the v2 B-tree */ /*(space in the file for each object is freed in the 'remove' callback) */ - if (H5B2_delete(idx_info->f, idx_info->storage->idx_addr, &u_ctx, remove_op, idx_info->f) < 0) + if (H5B2_delete(idx_info->f, idx_info->layout->storage.u.chunk.idx_addr, &u_ctx, remove_op, + idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree"); - idx_info->storage->idx_addr = HADDR_UNDEF; + idx_info->layout->storage.u.chunk.idx_addr = HADDR_UNDEF; } /* end if */ done: @@ -1364,15 +1380,13 @@ H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_id assert(idx_info_src->f); assert(idx_info_src->pline); assert(idx_info_src->layout); - assert(idx_info_src->storage); /* Destination file */ assert(idx_info_dst); assert(idx_info_dst->f); assert(idx_info_dst->pline); assert(idx_info_dst->layout); - assert(idx_info_dst->storage); - assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); + assert(!H5_addr_defined(idx_info_dst->layout->storage.u.chunk.idx_addr)); /* Check if the source v2 B-tree is open yet */ if (!H5D_BT2_IDX_IS_OPEN(idx_info_src)) @@ -1385,7 +1399,7 @@ H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_id /* Create v2 B-tree that describes the chunked dataset in the destination file */ if (H5D__bt2_idx_create(idx_info_dst) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage"); - assert(H5_addr_defined(idx_info_dst->storage->idx_addr)); + assert(H5_addr_defined(idx_info_dst->layout->storage.u.chunk.idx_addr)); /* Reset metadata tag */ H5_END_TAG @@ -1453,8 +1467,7 @@ H5D__bt2_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(index_size); /* Open v2 B-tree */ @@ -1462,7 +1475,7 @@ H5D__bt2_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); /* Set convenience pointer to v2 B-tree structure */ - bt2_cdset = idx_info->storage->u.btree2.bt2; + bt2_cdset = idx_info->layout->storage.u.chunk.u.btree2.bt2; /* Get v2 B-tree size for indexing chunked dataset */ if (H5B2_size(bt2_cdset, index_size) < 0) @@ -1544,12 +1557,12 @@ H5D__bt2_idx_dest(const H5D_chk_idx_info_t *idx_info) /* Check args */ assert(idx_info); assert(idx_info->f); - assert(idx_info->storage); + assert(idx_info->layout); /* Check if the v2-btree is open */ if (H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in bt2 if needed */ - if (H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) + if (H5B2_patch_file(idx_info->layout->storage.u.chunk.u.btree2.bt2, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer"); /* Close v2 B-tree */ diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 876e1e8bf13..f291fcb47b3 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -420,10 +420,9 @@ H5D__chunk_direct_write(H5D_t *dset, uint32_t filters, hsize_t *offset, uint32_t */ /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &(dset->shared->dcpl_cache.pline); - idx_info.layout = &(dset->shared->layout.u.chunk); - idx_info.storage = &(dset->shared->layout.storage.u.chunk); + idx_info.f = dset->oloc.file; + idx_info.pline = &(dset->shared->dcpl_cache.pline); + idx_info.layout = &(dset->shared->layout); /* Set up the size of chunk for user data */ udata.chunk_block.length = data_size; @@ -958,10 +957,9 @@ H5D__chunk_init(H5F_t *f, const H5D_t *const dset, hid_t dapl_id) } /* end if */ /* Compose chunked index info struct */ - idx_info.f = f; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = f; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Allocate any indexing structures */ if (sc->ops->init && (sc->ops->init)(&idx_info, dset->shared->space, dset->oloc.addr) < 0) @@ -1142,10 +1140,9 @@ H5D__chunk_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) H5D_chk_idx_info_t idx_info; bool index_is_open; - idx_info.f = dataset->oloc.file; - idx_info.pline = &dataset->shared->dcpl_cache.pline; - idx_info.layout = &dataset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dataset->oloc.file; + idx_info.pline = &dataset->shared->dcpl_cache.pline; + idx_info.layout = &dataset->shared->layout; assert(sc && sc->ops && sc->ops->is_open); if (sc->ops->is_open(&idx_info, &index_is_open) < 0) @@ -3159,10 +3156,9 @@ H5D__chunk_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) /* If the chunk hasn't been allocated on disk, do so now. */ if (!H5_addr_defined(udata.chunk_block.offset)) { /* Compose chunked index info struct */ - idx_info.f = dset_info->dset->oloc.file; - idx_info.pline = &(dset_info->dset->shared->dcpl_cache.pline); - idx_info.layout = &(dset_info->dset->shared->layout.u.chunk); - idx_info.storage = &(dset_info->dset->shared->layout.storage.u.chunk); + idx_info.f = dset_info->dset->oloc.file; + idx_info.pline = &(dset_info->dset->shared->dcpl_cache.pline); + idx_info.layout = &(dset_info->dset->shared->layout); /* Set up the size of chunk for user data */ udata.chunk_block.length = dset_info->dset->shared->layout.u.chunk.size; @@ -3311,10 +3307,9 @@ H5D__chunk_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) /* If the chunk hasn't been allocated on disk, do so now. */ if (!H5_addr_defined(udata.chunk_block.offset)) { /* Compose chunked index info struct */ - idx_info.f = dset_info->dset->oloc.file; - idx_info.pline = &(dset_info->dset->shared->dcpl_cache.pline); - idx_info.layout = &(dset_info->dset->shared->layout.u.chunk); - idx_info.storage = &(dset_info->dset->shared->layout.storage.u.chunk); + idx_info.f = dset_info->dset->oloc.file; + idx_info.pline = &(dset_info->dset->shared->dcpl_cache.pline); + idx_info.layout = &(dset_info->dset->shared->layout); /* Set up the size of chunk for user data */ udata.chunk_block.length = dset_info->dset->shared->layout.u.chunk.size; @@ -3529,10 +3524,9 @@ H5D__chunk_dest(H5D_t *dset) memset(rdcc, 0, sizeof(H5D_rdcc_t)); /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Free any index structures */ if (sc->ops->dest && (sc->ops->dest)(&idx_info) < 0) @@ -3709,10 +3703,9 @@ H5D__chunk_create(const H5D_t *dset /*in,out*/) #endif /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Create the index for the chunks */ if ((sc->ops->create)(&idx_info) < 0) @@ -3843,10 +3836,9 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5D_chunk_ud_t *udat H5D_chk_idx_info_t idx_info; /* Chunked index info */ /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; #ifdef H5_HAVE_PARALLEL if (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) { @@ -4042,10 +4034,9 @@ H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, bool reset) */ if (must_alloc) { /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Create the chunk it if it doesn't exist, or reallocate the chunk * if its size changed. @@ -4873,10 +4864,9 @@ H5D__chunk_allocated(const H5D_t *dset, hsize_t *nbytes) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer"); /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Iterate over the chunks */ if ((sc->ops->iterate)(&idx_info, H5D__chunk_allocated_cb, &chunk_bytes) < 0) @@ -5074,10 +5064,9 @@ H5D__chunk_allocate(const H5D_t *dset, bool full_overwrite, const hsize_t old_di } /* end if */ /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Loop over all chunks */ /* The algorithm is: @@ -6061,10 +6050,9 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim) chk_io_info.count = 1; /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = &dset->shared->layout.storage.u.chunk; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Initialize the user data for the iteration */ memset(&udata, 0, sizeof udata); @@ -6368,10 +6356,9 @@ H5D__chunk_addrmap(const H5D_t *dset, haddr_t chunk_addr[]) udata.chunk_addr = chunk_addr; /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Iterate over chunks to build mapping of chunk addresses */ if ((sc->ops->iterate)(&idx_info, H5D__chunk_addrmap_cb, &udata) < 0) @@ -6394,11 +6381,9 @@ H5D__chunk_addrmap(const H5D_t *dset, haddr_t chunk_addr[]) *------------------------------------------------------------------------- */ herr_t -H5D__chunk_delete(H5F_t *f, H5O_t *oh, H5O_storage_t *storage) +H5D__chunk_delete(H5F_t *f, H5O_t *oh, H5O_layout_t *layout) { H5D_chk_idx_info_t idx_info; /* Chunked index info */ - H5O_layout_t layout; /* Dataset layout message */ - bool layout_read = false; /* Whether the layout message was read from the file */ H5O_pline_t pline; /* I/O pipeline message */ bool pline_read = false; /* Whether the I/O pipeline message was read from the file */ htri_t exists; /* Flag if header message of interest exists */ @@ -6409,8 +6394,8 @@ H5D__chunk_delete(H5F_t *f, H5O_t *oh, H5O_storage_t *storage) /* Sanity check */ assert(f); assert(oh); - assert(storage); - H5D_CHUNK_STORAGE_INDEX_CHK(&storage->u.chunk); + assert(layout); + H5D_CHUNK_STORAGE_INDEX_CHK(&layout->storage.u.chunk); /* Check for I/O pipeline message */ if ((exists = H5O_msg_exists_oh(oh, H5O_PLINE_ID)) < 0) @@ -6423,25 +6408,13 @@ H5D__chunk_delete(H5F_t *f, H5O_t *oh, H5O_storage_t *storage) else memset(&pline, 0, sizeof(pline)); - /* Retrieve dataset layout message */ - if ((exists = H5O_msg_exists_oh(oh, H5O_LAYOUT_ID)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to check for object header message"); - else if (exists) { - if (NULL == H5O_msg_read_oh(f, oh, H5O_LAYOUT_ID, &layout)) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get layout message"); - layout_read = true; - } /* end else if */ - else - HGOTO_ERROR(H5E_DATASET, H5E_NOTFOUND, FAIL, "can't find layout message"); - /* Compose chunked index info struct */ - idx_info.f = f; - idx_info.pline = &pline; - idx_info.layout = &layout.u.chunk; - idx_info.storage = &storage->u.chunk; + idx_info.f = f; + idx_info.pline = &pline; + idx_info.layout = layout; /* Delete the chunked storage information in the file */ - if ((storage->u.chunk.ops->idx_delete)(&idx_info) < 0) + if ((layout->storage.u.chunk.ops->idx_delete)(&idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk index"); done: @@ -6449,9 +6422,6 @@ H5D__chunk_delete(H5F_t *f, H5O_t *oh, H5O_storage_t *storage) if (pline_read) if (H5O_msg_reset(H5O_PLINE_ID, &pline) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, FAIL, "unable to reset I/O pipeline message"); - if (layout_read) - if (H5O_msg_reset(H5O_LAYOUT_ID, &layout) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, FAIL, "unable to reset layout message"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_delete() */ @@ -6755,8 +6725,8 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) } /* end if */ /* Set up destination chunk callback information for insertion */ - udata_dst.common.layout = udata->idx_info_dst->layout; - udata_dst.common.storage = udata->idx_info_dst->storage; + udata_dst.common.layout = &udata->idx_info_dst->layout->u.chunk; + udata_dst.common.storage = &udata->idx_info_dst->layout->storage.u.chunk; udata_dst.common.scaled = chunk_rec->scaled; udata_dst.chunk_block.offset = HADDR_UNDEF; udata_dst.chunk_block.length = chunk_rec->nbytes; @@ -6798,8 +6768,9 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) H5_BEGIN_TAG(H5AC__COPIED_TAG) /* Insert chunk record into index */ - if (need_insert && udata->idx_info_dst->storage->ops->insert) - if ((udata->idx_info_dst->storage->ops->insert)(udata->idx_info_dst, &udata_dst, NULL) < 0) + if (need_insert && udata->idx_info_dst->layout->storage.u.chunk.ops->insert) + if ((udata->idx_info_dst->layout->storage.u.chunk.ops->insert)(udata->idx_info_dst, &udata_dst, + NULL) < 0) HGOTO_ERROR_TAG(H5E_DATASET, H5E_CANTINSERT, H5_ITER_ERROR, "unable to insert chunk addr into index"); @@ -6821,9 +6792,9 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) *------------------------------------------------------------------------- */ herr_t -H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk_t *layout_src, H5F_t *f_dst, - H5O_storage_chunk_t *storage_dst, const H5S_extent_t *ds_extent_src, H5T_t *dt_src, - const H5O_pline_t *pline_src, H5O_copy_t *cpy_info) +H5D__chunk_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst, H5O_layout_t *layout_dst, + const H5S_extent_t *ds_extent_src, H5T_t *dt_src, const H5O_pline_t *pline_src, + H5O_copy_t *cpy_info) { H5D_chunk_it_ud3_t udata; /* User data for iteration callback */ H5D_chk_idx_info_t idx_info_dst; /* Dest. chunked index info */ @@ -6852,12 +6823,11 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk /* Check args */ assert(f_src); - assert(storage_src); - H5D_CHUNK_STORAGE_INDEX_CHK(storage_src); assert(layout_src); + H5D_CHUNK_STORAGE_INDEX_CHK(&layout_src->storage.u.chunk); assert(f_dst); - assert(storage_dst); - H5D_CHUNK_STORAGE_INDEX_CHK(storage_dst); + assert(layout_dst); + H5D_CHUNK_STORAGE_INDEX_CHK(&layout_dst->storage.u.chunk); assert(ds_extent_src); assert(dt_src); @@ -6870,7 +6840,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk pline = pline_src; /* Layout is not created in the destination file, reset index address */ - if (H5D_chunk_idx_reset(storage_dst, true) < 0) + if (H5D_chunk_idx_reset(&layout_dst->storage.u.chunk, true) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to reset chunked storage index in dest"); /* Initialize layout information */ @@ -6883,23 +6853,25 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk H5_CHECKED_ASSIGN(ndims, unsigned, sndims, int); /* Set the source layout chunk information */ - if (H5D__chunk_set_info_real(layout_src, ndims, curr_dims, max_dims) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout's chunk info"); + if (H5D__chunk_set_info_real(&layout_src->u.chunk, ndims, curr_dims, max_dims) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set source layout's chunk info"); + + /* Set the destination layout chunk information */ + if (H5D__chunk_set_info_real(&layout_dst->u.chunk, ndims, curr_dims, max_dims) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set destination layout's chunk info"); } /* end block */ /* Compose source & dest chunked index info structs */ - idx_info_src.f = f_src; - idx_info_src.pline = pline; - idx_info_src.layout = layout_src; - idx_info_src.storage = storage_src; + idx_info_src.f = f_src; + idx_info_src.pline = pline; + idx_info_src.layout = layout_src; - idx_info_dst.f = f_dst; - idx_info_dst.pline = pline; /* Use same I/O filter pipeline for dest. */ - idx_info_dst.layout = layout_src /* Use same layout for dest. */; - idx_info_dst.storage = storage_dst; + idx_info_dst.f = f_dst; + idx_info_dst.pline = pline; /* Use same I/O filter pipeline for dest. */ + idx_info_dst.layout = layout_dst; /* Call the index-specific "copy setup" routine */ - if ((storage_src->ops->copy_setup)(&idx_info_src, &idx_info_dst) < 0) + if ((layout_src->storage.u.chunk.ops->copy_setup)(&idx_info_src, &idx_info_dst) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up index-specific chunk copying information"); copy_setup_done = true; @@ -6942,8 +6914,8 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk /* Compute the number of elements per chunk */ nelmts = 1; - for (u = 0; u < (layout_src->ndims - 1); u++) - nelmts *= layout_src->dim[u]; + for (u = 0; u < (layout_src->u.chunk.ndims - 1); u++) + nelmts *= layout_src->u.chunk.dim[u]; /* Create the space and set the initial extent */ buf_dim = nelmts; @@ -6967,7 +6939,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk do_convert = true; } /* end if */ - H5_CHECKED_ASSIGN(buf_size, size_t, layout_src->size, uint32_t); + H5_CHECKED_ASSIGN(buf_size, size_t, layout_src->u.chunk.size, uint32_t); reclaim_buf_size = 0; } /* end else */ @@ -6989,8 +6961,8 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk /* Initialize the callback structure for the source */ memset(&udata, 0, sizeof udata); - udata.common.layout = layout_src; - udata.common.storage = storage_src; + udata.common.layout = &layout_src->u.chunk; + udata.common.storage = &layout_src->storage.u.chunk; udata.file_src = f_src; udata.idx_info_dst = &idx_info_dst; udata.buf = buf; @@ -7014,7 +6986,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk udata.chunk = NULL; /* Iterate over chunks to copy data */ - if ((storage_src->ops->iterate)(&idx_info_src, H5D__chunk_copy_cb, &udata) < 0) + if ((layout_src->storage.u.chunk.ops->iterate)(&idx_info_src, H5D__chunk_copy_cb, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to copy data"); /* Iterate over the chunk cache to copy data for chunks with undefined address */ @@ -7023,7 +6995,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk H5D_chunk_rec_t chunk_rec; H5D_shared_t *shared_fo = (H5D_shared_t *)udata.cpy_info->shared_fo; - chunk_rec.nbytes = layout_src->size; + chunk_rec.nbytes = layout_src->u.chunk.size; chunk_rec.filter_mask = 0; chunk_rec.chunk_addr = HADDR_UNDEF; @@ -7059,8 +7031,9 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk /* Clean up any index information */ if (copy_setup_done) - if (storage_src->ops->copy_shutdown && - (storage_src->ops->copy_shutdown)(storage_src, storage_dst) < 0) + if (layout_src->storage.u.chunk.ops->copy_shutdown && + (layout_src->storage.u.chunk.ops->copy_shutdown)(&layout_src->storage.u.chunk, + &layout_dst->storage.u.chunk) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to shut down index copying info"); FUNC_LEAVE_NOAPI(ret_value) @@ -7110,10 +7083,9 @@ H5D__chunk_bh_info(const H5O_loc_t *loc, H5O_t *oh, H5O_layout_t *layout, hsize_ memset(&pline, 0, sizeof(pline)); /* Compose chunked index info struct */ - idx_info.f = loc->file; - idx_info.pline = &pline; - idx_info.layout = &layout->u.chunk; - idx_info.storage = sc; + idx_info.f = loc->file; + idx_info.pline = &pline; + idx_info.layout = layout; /* Get the dataspace for the dataset */ if (NULL == (space = H5S_read(loc))) @@ -7216,10 +7188,9 @@ H5D__chunk_dump_index(H5D_t *dset, FILE *stream) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to dump chunk index info"); /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = sc; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Set up user data for callback */ udata.stream = stream; @@ -7455,7 +7426,6 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); assert(new_chunk); assert(need_insert); @@ -7463,16 +7433,19 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old /* Check for filters on chunks */ if (idx_info->pline->nused > 0) { - /* Sanity/error checking block */ - assert(idx_info->storage->idx_type != H5D_CHUNK_IDX_NONE); - { + /* If we are using a layout version of 4 or earlier, we must make sure the filter did not create a + * chunk that's too large to have its size encoded. Version 5 always uses 64 bits to encode the chunk + * size. Single chunk works regardless of version. */ + assert(idx_info->layout->storage.u.chunk.idx_type != H5D_CHUNK_IDX_NONE); + if (idx_info->layout->version <= H5O_LAYOUT_VERSION_4 && + idx_info->layout->storage.u.chunk.idx_type != H5D_CHUNK_IDX_SINGLE) { unsigned allow_chunk_size_len; /* Allowed size of encoded chunk size */ unsigned new_chunk_size_len; /* Size of encoded chunk size */ /* Compute the size required for encoding the size of a chunk, allowing * for an extra byte, in case the filter makes the chunk larger. */ - allow_chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)(idx_info->layout->size)) + 8) / 8); + allow_chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)(idx_info->layout->u.chunk.size)) + 8) / 8); if (allow_chunk_size_len > 8) allow_chunk_size_len = 8; @@ -7484,7 +7457,7 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old /* Check if the chunk became too large to be encoded */ if (new_chunk_size_len > allow_chunk_size_len) HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk size can't be encoded"); - } /* end block */ + } if (old_chunk && H5_addr_defined(old_chunk->offset)) { /* Sanity check */ @@ -7515,18 +7488,18 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old } /* end if */ else { assert(!H5_addr_defined(new_chunk->offset)); - assert(new_chunk->length == idx_info->layout->size); + assert(new_chunk->length == idx_info->layout->u.chunk.size); alloc_chunk = true; } /* end else */ /* Actually allocate space for the chunk in the file */ if (alloc_chunk) { - switch (idx_info->storage->idx_type) { + switch (idx_info->layout->storage.u.chunk.idx_type) { case H5D_CHUNK_IDX_NONE: { H5D_chunk_ud_t udata; udata.common.scaled = scaled; - if ((idx_info->storage->ops->get_addr)(idx_info, &udata) < 0) + if ((idx_info->layout->storage.u.chunk.ops->get_addr)(idx_info, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query chunk address"); new_chunk->offset = udata.chunk_block.offset; assert(new_chunk->length == udata.chunk_block.length); @@ -7589,9 +7562,9 @@ H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) chunk_addr = chunk_rec->chunk_addr; if (new_idx_info->pline->nused && - (new_idx_info->layout->flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) && - (H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, new_idx_info->layout->dim, chunk_rec->scaled, - udata->dset_dims))) { + (new_idx_info->layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) && + (H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, new_idx_info->layout->u.chunk.dim, + chunk_rec->scaled, udata->dset_dims))) { /* This is a partial non-filtered edge chunk */ /* Convert the chunk to a filtered edge chunk for v1 B-tree chunk index */ @@ -7600,7 +7573,7 @@ H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) H5Z_cb_t filter_cb; /* Filter failure callback struct */ size_t read_size = nbytes; /* Bytes to read */ - assert(read_size == new_idx_info->layout->size); + assert(read_size == new_idx_info->layout->u.chunk.size); /* Initialize the filter callback struct */ filter_cb.op_data = NULL; @@ -7641,11 +7614,11 @@ H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) insert_udata.chunk_block.length = nbytes; insert_udata.filter_mask = chunk_rec->filter_mask; insert_udata.common.scaled = chunk_rec->scaled; - insert_udata.common.layout = new_idx_info->layout; - insert_udata.common.storage = new_idx_info->storage; + insert_udata.common.layout = &new_idx_info->layout->u.chunk; + insert_udata.common.storage = &new_idx_info->layout->storage.u.chunk; /* Insert chunk into the v1 B-tree chunk index */ - if ((new_idx_info->storage->ops->insert)(new_idx_info, &insert_udata, NULL) < 0) + if ((new_idx_info->layout->storage.u.chunk.ops->insert)(new_idx_info, &insert_udata, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, H5_ITER_ERROR, "unable to insert chunk addr into index"); done: @@ -7683,7 +7656,7 @@ H5D__chunk_format_convert(H5D_t *dset, H5D_chk_idx_info_t *idx_info, H5D_chk_idx /* Iterate over the chunks in the current index and insert the chunk addresses into version 1 B-tree index */ - if ((idx_info->storage->ops->iterate)(idx_info, H5D__chunk_format_convert_cb, &udata) < 0) + if ((idx_info->layout->storage.u.chunk.ops->iterate)(idx_info, H5D__chunk_format_convert_cb, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to chunk info"); done: @@ -7754,14 +7727,13 @@ H5D__chunk_index_empty(const H5D_t *dset, bool *empty) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer"); /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = &dset->shared->layout.storage.u.chunk; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; *empty = true; - if (H5_addr_defined(idx_info.storage->idx_addr)) { + if (H5_addr_defined(idx_info.layout->storage.u.chunk.idx_addr)) { /* Iterate over the allocated chunks */ if ((dset->shared->layout.storage.u.chunk.ops->iterate)(&idx_info, H5D__chunk_index_empty_cb, empty) < 0) @@ -7840,13 +7812,12 @@ H5D__get_num_chunks(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer"); /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = &dset->shared->layout.storage.u.chunk; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* If the dataset is not written, number of chunks will be 0 */ - if (!H5_addr_defined(idx_info.storage->idx_addr)) + if (!H5_addr_defined(idx_info.layout->storage.u.chunk.idx_addr)) *nchunks = 0; else { /* Iterate over the allocated chunks */ @@ -7945,10 +7916,9 @@ H5D__get_chunk_info(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer"); /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = &dset->shared->layout.storage.u.chunk; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* Set addr & size for when dset is not written or queried chunk is not found */ if (addr) @@ -7957,7 +7927,7 @@ H5D__get_chunk_info(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_ *size = 0; /* If the chunk is written, get its info, otherwise, return without error */ - if (H5_addr_defined(idx_info.storage->idx_addr)) { + if (H5_addr_defined(idx_info.layout->storage.u.chunk.idx_addr)) { H5D_chunk_info_iter_ud_t udata; @@ -8086,13 +8056,12 @@ H5D__get_chunk_info_by_coord(const H5D_t *dset, const hsize_t *offset, unsigned *size = 0; /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &dset->shared->layout.u.chunk; - idx_info.storage = &dset->shared->layout.storage.u.chunk; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = &dset->shared->layout; /* If the dataset is not written, return without errors */ - if (H5_addr_defined(idx_info.storage->idx_addr)) { + if (H5_addr_defined(idx_info.layout->storage.u.chunk.idx_addr)) { H5D_chunk_info_iter_ud_t udata; @@ -8206,13 +8175,12 @@ H5D__chunk_iter(H5D_t *dset, H5D_chunk_iter_op_t op, void *op_data) HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "cannot flush indexed storage buffer"); /* Compose chunked index info struct */ - idx_info.f = dset->oloc.file; - idx_info.pline = &dset->shared->dcpl_cache.pline; - idx_info.layout = &layout->u.chunk; - idx_info.storage = &layout->storage.u.chunk; + idx_info.f = dset->oloc.file; + idx_info.pline = &dset->shared->dcpl_cache.pline; + idx_info.layout = layout; /* If the dataset is not written, return without errors */ - if (H5_addr_defined(idx_info.storage->idx_addr)) { + if (H5_addr_defined(idx_info.layout->storage.u.chunk.idx_addr)) { H5D_chunk_iter_ud_t ud; /* Set up info for iteration callback */ diff --git a/src/H5Dearray.c b/src/H5Dearray.c index d2335f5a2bf..2283f2bbea7 100644 --- a/src/H5Dearray.c +++ b/src/H5Dearray.c @@ -39,7 +39,7 @@ /* Local Macros */ /****************/ -#define H5D_EARRAY_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.earray.ea) +#define H5D_EARRAY_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->layout->storage.u.chunk.u.earray.ea) /* Value to fill unset array elements with */ #define H5D_EARRAY_FILL HADDR_UNDEF @@ -48,14 +48,38 @@ HADDR_UNDEF, 0, 0 \ } +/* + * Macros to compute the size required for encoding the size of a chunk. For version 4, this is the minimum + * number of bytes required to encode the size of an unfiltered chunk plus an extra byte, in case the filter + * makes the chunk larger. For versions after 4, this is simply the size of lengths for the file. For + * unfiltered chunks, this is 0. + */ +#define H5D_EARRAY_FILT_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, f, layout) \ + do { \ + if ((layout)->version > H5O_LAYOUT_VERSION_4) \ + (chunk_size_len) = H5F_SIZEOF_SIZE(f); \ + else { \ + (chunk_size_len) = 1 + ((H5VM_log2_gen((uint64_t)(layout)->u.chunk.size) + 8) / 8); \ + if ((chunk_size_len) > 8) \ + (chunk_size_len) = 8; \ + } \ + } while (0) +#define H5D_EARRAY_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, idx_info) \ + do { \ + if ((idx_info)->pline->nused > 0) \ + H5D_EARRAY_FILT_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, (idx_info)->f, (idx_info)->layout); \ + else \ + (chunk_size_len) = 0; \ + } while (0) + /******************/ /* Local Typedefs */ /******************/ /* Extensible array create/open user data */ typedef struct H5D_earray_ctx_ud_t { - const H5F_t *f; /* Pointer to file info */ - uint32_t chunk_size; /* Size of chunk (bytes) */ + const H5F_t *f; /* Pointer to file info */ + size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */ } H5D_earray_ctx_ud_t; /* Extensible array callback context */ @@ -103,6 +127,7 @@ static herr_t H5D__earray_filt_fill(void *nat_blk, size_t nelmts); static herr_t H5D__earray_filt_encode(void *raw, const void *elmt, size_t nelmts, void *ctx); static herr_t H5D__earray_filt_decode(const void *raw, void *elmt, size_t nelmts, void *ctx); static herr_t H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const void *elmt); +static void *H5D__earray_filt_crt_dbg_context(H5F_t *f, haddr_t obj_addr); /* Chunked layout indexing callbacks */ static herr_t H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, @@ -182,17 +207,17 @@ const H5EA_class_t H5EA_CLS_CHUNK[1] = {{ /* Extensible array class callbacks for dataset chunks w/filters */ const H5EA_class_t H5EA_CLS_FILT_CHUNK[1] = {{ - H5EA_CLS_FILT_CHUNK_ID, /* Type of extensible array */ - "Chunk w/filters", /* Name of extensible array class */ - sizeof(H5D_earray_filt_elmt_t), /* Size of native element */ - H5D__earray_crt_context, /* Create context */ - H5D__earray_dst_context, /* Destroy context */ - H5D__earray_filt_fill, /* Fill block of missing elements callback */ - H5D__earray_filt_encode, /* Element encoding callback */ - H5D__earray_filt_decode, /* Element decoding callback */ - H5D__earray_filt_debug, /* Element debugging callback */ - H5D__earray_crt_dbg_context, /* Create debugging context */ - H5D__earray_dst_dbg_context /* Destroy debugging context */ + H5EA_CLS_FILT_CHUNK_ID, /* Type of extensible array */ + "Chunk w/filters", /* Name of extensible array class */ + sizeof(H5D_earray_filt_elmt_t), /* Size of native element */ + H5D__earray_crt_context, /* Create context */ + H5D__earray_dst_context, /* Destroy context */ + H5D__earray_filt_fill, /* Fill block of missing elements callback */ + H5D__earray_filt_encode, /* Element encoding callback */ + H5D__earray_filt_decode, /* Element decoding callback */ + H5D__earray_filt_debug, /* Element debugging callback */ + H5D__earray_filt_crt_dbg_context, /* Create debugging context */ + H5D__earray_dst_dbg_context /* Destroy debugging context */ }}; /*******************/ @@ -226,7 +251,6 @@ H5D__earray_crt_context(void *_udata) /* Sanity checks */ assert(udata); assert(udata->f); - assert(udata->chunk_size > 0); /* Allocate new context structure */ if (NULL == (ctx = H5FL_MALLOC(H5D_earray_ctx_t))) @@ -234,14 +258,8 @@ H5D__earray_crt_context(void *_udata) "can't allocate extensible array client callback context"); /* Initialize the context */ - ctx->file_addr_len = H5F_SIZEOF_ADDR(udata->f); - - /* Compute the size required for encoding the size of a chunk, allowing - * for an extra byte, in case the filter makes the chunk larger. - */ - ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8); - if (ctx->chunk_size_len > 8) - ctx->chunk_size_len = 8; + ctx->file_addr_len = H5F_SIZEOF_ADDR(udata->f); + ctx->chunk_size_len = udata->chunk_size_len; /* Set return value */ ret_value = ctx; @@ -567,7 +585,52 @@ H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const *------------------------------------------------------------------------- */ static void * -H5D__earray_crt_dbg_context(H5F_t *f, haddr_t obj_addr) +H5D__earray_crt_dbg_context(H5F_t *f, haddr_t H5_ATTR_UNUSED obj_addr) +{ + H5D_earray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(f); + assert(H5_addr_defined(obj_addr)); + + /* Allocate context for debugging callback */ + if (NULL == (dbg_ctx = H5FL_MALLOC(H5D_earray_ctx_ud_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, + "can't allocate extensible array client callback context"); + + /* Create user data */ + dbg_ctx->f = f; + dbg_ctx->chunk_size_len = 0; + + /* Set return value */ + ret_value = dbg_ctx; + +done: + /* Cleanup on error */ + if (ret_value == NULL) + /* Release context structure */ + if (dbg_ctx) + dbg_ctx = H5FL_FREE(H5D_earray_ctx_ud_t, dbg_ctx); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_crt_dbg_context() */ + +/*------------------------------------------------------------------------- + * Function: H5D__earray_filt_crt_dbg_context + * + * Purpose: Create context for debugging callback + * (get the layout message in the specified object header) + * + * Return: Success: non-NULL + * Failure: NULL + * + *------------------------------------------------------------------------- + */ +static void * +H5D__earray_filt_crt_dbg_context(H5F_t *f, haddr_t obj_addr) { H5D_earray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */ H5O_loc_t obj_loc; /* Pointer to an object's location */ @@ -603,10 +666,13 @@ H5D__earray_crt_dbg_context(H5F_t *f, haddr_t obj_addr) /* close the object header */ if (H5O_close(&obj_loc, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header"); + obj_opened = false; /* Create user data */ - dbg_ctx->f = f; - dbg_ctx->chunk_size = layout.u.chunk.size; + dbg_ctx->f = f; + + /* Calculate length of chunk size field */ + H5D_EARRAY_FILT_COMPUTE_CHUNK_SIZE_LEN(dbg_ctx->chunk_size_len, f, &layout); /* Set return value */ ret_value = dbg_ctx; @@ -622,10 +688,12 @@ H5D__earray_crt_dbg_context(H5F_t *f, haddr_t obj_addr) if (obj_opened) if (H5O_close(&obj_loc, NULL) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header"); - } /* end if */ + } + else + assert(!obj_opened); FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__earray_crt_dbg_context() */ +} /* end H5D__earray_filt_crt_dbg_context() */ /*------------------------------------------------------------------------- * Function: H5D__earray_dst_dbg_context @@ -682,16 +750,15 @@ H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info) assert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE); assert(idx_info->pline); assert(idx_info->layout); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(idx_info->storage->u.earray.ea); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->u.chunk.idx_type); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->storage.u.chunk.idx_type); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); + assert(idx_info->layout->storage.u.chunk.u.earray.ea); /* Set up object header location for dataset */ H5O_loc_reset(&oloc); oloc.file = idx_info->f; - oloc.addr = idx_info->storage->u.earray.dset_ohdr_addr; + oloc.addr = idx_info->layout->storage.u.chunk.u.earray.dset_ohdr_addr; /* Get header */ if (NULL == (oh = H5O_protect(&oloc, H5AC__READ_ONLY_FLAG, true))) @@ -702,7 +769,7 @@ H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get dataset object header proxy"); /* Make the extensible array a child flush dependency of the dataset's object header */ - if (H5EA_depend(idx_info->storage->u.earray.ea, oh_proxy) < 0) + if (H5EA_depend(idx_info->layout->storage.u.chunk.u.earray.ea, oh_proxy) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header proxy"); @@ -740,7 +807,6 @@ H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, had assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); assert(space); assert(H5_addr_defined(dset_ohdr_addr)); @@ -768,10 +834,10 @@ H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, had HGOTO_ERROR(H5E_DATASET, H5E_UNINITIALIZED, FAIL, "didn't find unlimited dimension"); /* Set the unlimited dimension for the layout's future use */ - idx_info->layout->u.earray.unlim_dim = (unsigned)unlim_dim; + idx_info->layout->u.chunk.u.earray.unlim_dim = (unsigned)unlim_dim; /* Store the dataset's object header address for later */ - idx_info->storage->u.earray.dset_ohdr_addr = dset_ohdr_addr; + idx_info->layout->storage.u.chunk.u.earray.dset_ohdr_addr = dset_ohdr_addr; done: FUNC_LEAVE_NOAPI(ret_value) @@ -795,9 +861,10 @@ H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, had static herr_t H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info) { - H5EA_create_t cparam; /* Extensible array creation parameters */ - H5D_earray_ctx_ud_t udata; /* User data for extensible array create call */ - herr_t ret_value = SUCCEED; /* Return value */ + H5EA_create_t cparam; /* Extensible array creation parameters */ + H5D_earray_ctx_ud_t udata; /* User data for extensible array create call */ + unsigned chunk_size_len = 0; /* Size of encoded chunk size */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -806,21 +873,12 @@ H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(!H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.earray.ea); + assert(!H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); + assert(NULL == idx_info->layout->storage.u.chunk.u.earray.ea); /* General parameters */ + H5D_EARRAY_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, idx_info); if (idx_info->pline->nused > 0) { - unsigned chunk_size_len; /* Size of encoded chunk size */ - - /* Compute the size required for encoding the size of a chunk, allowing - * for an extra byte, in case the filter makes the chunk larger. - */ - chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8); - if (chunk_size_len > 8) - chunk_size_len = 8; - cparam.cls = H5EA_CLS_FILT_CHUNK; cparam.raw_elmt_size = (uint8_t)(H5F_SIZEOF_ADDR(idx_info->f) + chunk_size_len + 4); } /* end if */ @@ -828,27 +886,28 @@ H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info) cparam.cls = H5EA_CLS_CHUNK; cparam.raw_elmt_size = (uint8_t)H5F_SIZEOF_ADDR(idx_info->f); } /* end else */ - cparam.max_nelmts_bits = idx_info->layout->u.earray.cparam.max_nelmts_bits; + cparam.max_nelmts_bits = idx_info->layout->u.chunk.u.earray.cparam.max_nelmts_bits; assert(cparam.max_nelmts_bits > 0); - cparam.idx_blk_elmts = idx_info->layout->u.earray.cparam.idx_blk_elmts; + cparam.idx_blk_elmts = idx_info->layout->u.chunk.u.earray.cparam.idx_blk_elmts; assert(cparam.idx_blk_elmts > 0); - cparam.sup_blk_min_data_ptrs = idx_info->layout->u.earray.cparam.sup_blk_min_data_ptrs; + cparam.sup_blk_min_data_ptrs = idx_info->layout->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs; assert(cparam.sup_blk_min_data_ptrs > 0); - cparam.data_blk_min_elmts = idx_info->layout->u.earray.cparam.data_blk_min_elmts; + cparam.data_blk_min_elmts = idx_info->layout->u.chunk.u.earray.cparam.data_blk_min_elmts; assert(cparam.data_blk_min_elmts > 0); - cparam.max_dblk_page_nelmts_bits = idx_info->layout->u.earray.cparam.max_dblk_page_nelmts_bits; + cparam.max_dblk_page_nelmts_bits = idx_info->layout->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits; assert(cparam.max_dblk_page_nelmts_bits > 0); /* Set up the user data */ - udata.f = idx_info->f; - udata.chunk_size = idx_info->layout->size; + udata.f = idx_info->f; + udata.chunk_size_len = (size_t)chunk_size_len; /* Create the extensible array for the chunk index */ - if (NULL == (idx_info->storage->u.earray.ea = H5EA_create(idx_info->f, &cparam, &udata))) + if (NULL == (idx_info->layout->storage.u.chunk.u.earray.ea = H5EA_create(idx_info->f, &cparam, &udata))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create extensible array"); /* Get the address of the extensible array in file */ - if (H5EA_get_addr(idx_info->storage->u.earray.ea, &(idx_info->storage->idx_addr)) < 0) + if (H5EA_get_addr(idx_info->layout->storage.u.chunk.u.earray.ea, + &(idx_info->layout->storage.u.chunk.idx_addr)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array address"); /* Check for SWMR writes to the file */ @@ -890,19 +949,20 @@ H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.earray.ea); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->u.chunk.idx_type); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->storage.u.chunk.idx_type); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); + assert(NULL == idx_info->layout->storage.u.chunk.u.earray.ea); /* Set up the user data */ - udata.f = idx_info->f; - udata.chunk_size = idx_info->layout->size; + udata.f = idx_info->f; + + /* Compute number of bytes used to encode the chunk size */ + H5D_EARRAY_COMPUTE_CHUNK_SIZE_LEN(udata.chunk_size_len, idx_info); /* Open the extensible array for the chunk index */ - if (NULL == - (idx_info->storage->u.earray.ea = H5EA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) + if (NULL == (idx_info->layout->storage.u.chunk.u.earray.ea = + H5EA_open(idx_info->f, idx_info->layout->storage.u.chunk.idx_addr, &udata))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open extensible array"); /* Check for SWMR writes to the file */ @@ -933,13 +993,13 @@ H5D__earray_idx_close(const H5D_chk_idx_info_t *idx_info) FUNC_ENTER_PACKAGE assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); - assert(idx_info->storage->u.earray.ea); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->storage.u.chunk.idx_type); + assert(idx_info->layout->storage.u.chunk.u.earray.ea); - if (H5EA_close(idx_info->storage->u.earray.ea) < 0) + if (H5EA_close(idx_info->layout->storage.u.chunk.u.earray.ea) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); - idx_info->storage->u.earray.ea = NULL; + idx_info->layout->storage.u.chunk.u.earray.ea = NULL; done: FUNC_LEAVE_NOAPI(ret_value) @@ -960,8 +1020,8 @@ H5D__earray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) FUNC_ENTER_PACKAGE_NOERR assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->storage.u.chunk.idx_type); assert(is_open); *is_open = H5D_EARRAY_IDX_IS_OPEN(idx_info); @@ -1012,8 +1072,7 @@ H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Check if the extensible array is open yet */ @@ -1023,10 +1082,10 @@ H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); } else /* Patch the top level file pointer contained in ea if needed */ - H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f); + H5EA_patch_file(idx_info->layout->storage.u.chunk.u.earray.ea, idx_info->f); /* Set convenience pointer to extensible array structure */ - ea = idx_info->storage->u.earray.ea; + ea = idx_info->layout->storage.u.chunk.u.earray.ea; if (!H5_addr_defined(udata->chunk_block.offset)) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "The chunk should have allocated already"); @@ -1080,8 +1139,7 @@ H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Check if the extensible array is open yet */ @@ -1091,31 +1149,31 @@ H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); } else /* Patch the top level file pointer contained in ea if needed */ - H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f); + H5EA_patch_file(idx_info->layout->storage.u.chunk.u.earray.ea, idx_info->f); /* Set convenience pointer to extensible array structure */ - ea = idx_info->storage->u.earray.ea; + ea = idx_info->layout->storage.u.chunk.u.earray.ea; /* Check for unlimited dim. not being the slowest-changing dim. */ - if (idx_info->layout->u.earray.unlim_dim > 0) { - hsize_t swizzled_coords[H5O_LAYOUT_NDIMS]; /* swizzled chunk coordinates */ - unsigned ndims = (idx_info->layout->ndims - 1); /* Number of dimensions */ + if (idx_info->layout->u.chunk.u.earray.unlim_dim > 0) { + hsize_t swizzled_coords[H5O_LAYOUT_NDIMS]; /* swizzled chunk coordinates */ + unsigned ndims = (idx_info->layout->u.chunk.ndims - 1); /* Number of dimensions */ unsigned u; /* Compute coordinate offset from scaled offset */ for (u = 0; u < ndims; u++) - swizzled_coords[u] = udata->common.scaled[u] * idx_info->layout->dim[u]; + swizzled_coords[u] = udata->common.scaled[u] * idx_info->layout->u.chunk.dim[u]; - H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.earray.unlim_dim); + H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.chunk.u.earray.unlim_dim); /* Calculate the index of this chunk */ - idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, - idx_info->layout->u.earray.swizzled_max_down_chunks); + idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.chunk.u.earray.swizzled_dim, + idx_info->layout->u.chunk.u.earray.swizzled_max_down_chunks); } /* end if */ else { /* Calculate the index of this chunk */ - idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, - udata->common.scaled); + idx = H5VM_array_offset_pre((idx_info->layout->u.chunk.ndims - 1), + idx_info->layout->u.chunk.max_down_chunks, udata->common.scaled); } /* end else */ udata->chunk_idx = idx; @@ -1139,7 +1197,7 @@ H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address"); /* Update the other (constant) information for the chunk */ - udata->chunk_block.length = idx_info->layout->size; + udata->chunk_block.length = idx_info->layout->u.chunk.size; udata->filter_mask = 0; } /* end else */ @@ -1178,8 +1236,8 @@ H5D__earray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info) * a chunk to cause it to be read in or created if it * doesn't exist yet. */ - chunk_ud.common.layout = idx_info->layout; - chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.layout = &idx_info->layout->u.chunk; + chunk_ud.common.storage = &idx_info->layout->storage.u.chunk; chunk_ud.common.scaled = scaled; chunk_ud.chunk_block.offset = HADDR_UNDEF; @@ -1322,8 +1380,7 @@ H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(chunk_cb); assert(chunk_udata); @@ -1334,10 +1391,10 @@ H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, H5_ITER_ERROR, "can't open extensible array"); } else /* Patch the top level file pointer contained in ea if needed */ - H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f); + H5EA_patch_file(idx_info->layout->storage.u.chunk.u.earray.ea, idx_info->f); /* Set convenience pointer to extensible array structure */ - ea = idx_info->storage->u.earray.ea; + ea = idx_info->layout->storage.u.chunk.u.earray.ea; /* Get the extensible array statistics */ if (H5EA_get_stats(ea, &ea_stat) < 0) @@ -1348,12 +1405,12 @@ H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t /* Initialize userdata */ memset(&udata, 0, sizeof udata); - udata.common.layout = idx_info->layout; - udata.common.storage = idx_info->storage; + udata.common.layout = &idx_info->layout->u.chunk; + udata.common.storage = &idx_info->layout->storage.u.chunk; memset(&udata.chunk_rec, 0, sizeof(udata.chunk_rec)); udata.filtered = (idx_info->pline->nused > 0); if (!udata.filtered) { - udata.chunk_rec.nbytes = idx_info->layout->size; + udata.chunk_rec.nbytes = idx_info->layout->u.chunk.size; udata.chunk_rec.filter_mask = 0; } /* end if */ udata.cb = chunk_cb; @@ -1391,8 +1448,7 @@ H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Check if the extensible array is open yet */ @@ -1402,32 +1458,32 @@ H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); } else /* Patch the top level file pointer contained in ea if needed */ - if (H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f) < 0) + if (H5EA_patch_file(idx_info->layout->storage.u.chunk.u.earray.ea, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch earray file pointer"); /* Set convenience pointer to extensible array structure */ - ea = idx_info->storage->u.earray.ea; + ea = idx_info->layout->storage.u.chunk.u.earray.ea; /* Check for unlimited dim. not being the slowest-changing dim. */ - if (idx_info->layout->u.earray.unlim_dim > 0) { - hsize_t swizzled_coords[H5O_LAYOUT_NDIMS]; /* swizzled chunk coordinates */ - unsigned ndims = (idx_info->layout->ndims - 1); /* Number of dimensions */ + if (idx_info->layout->u.chunk.u.earray.unlim_dim > 0) { + hsize_t swizzled_coords[H5O_LAYOUT_NDIMS]; /* swizzled chunk coordinates */ + unsigned ndims = (idx_info->layout->u.chunk.ndims - 1); /* Number of dimensions */ unsigned u; /* Compute coordinate offset from scaled offset */ for (u = 0; u < ndims; u++) - swizzled_coords[u] = udata->scaled[u] * idx_info->layout->dim[u]; + swizzled_coords[u] = udata->scaled[u] * idx_info->layout->u.chunk.dim[u]; - H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.earray.unlim_dim); + H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.chunk.u.earray.unlim_dim); /* Calculate the index of this chunk */ - idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, - idx_info->layout->u.earray.swizzled_max_down_chunks); + idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.chunk.u.earray.swizzled_dim, + idx_info->layout->u.chunk.u.earray.swizzled_max_down_chunks); } /* end if */ else { /* Calculate the index of this chunk */ - idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, - udata->scaled); + idx = H5VM_array_offset_pre((idx_info->layout->u.chunk.ndims - 1), + idx_info->layout->u.chunk.max_down_chunks, udata->scaled); } /* end else */ /* Check for filters on chunks */ @@ -1463,8 +1519,8 @@ H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t /* Remove raw data chunk from file if not doing SWMR writes */ assert(H5_addr_defined(addr)); if (!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) { - H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */ uint32_t, /*To: */ hsize_t); - if (H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, addr, (hsize_t)idx_info->layout->size) < 0) + H5_CHECK_OVERFLOW(idx_info->layout->u.chunk.size, /*From: */ uint32_t, /*To: */ hsize_t); + if (H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, addr, (hsize_t)idx_info->layout->u.chunk.size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk"); } /* end if */ @@ -1538,10 +1594,9 @@ H5D__earray_idx_delete(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); /* Check if the index data structure has been allocated */ - if (H5_addr_defined(idx_info->storage->idx_addr)) { + if (H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)) { H5D_earray_ctx_ud_t ctx_udata; /* User data for extensible array open call */ /* Iterate over the chunk addresses in the extensible array, deleting each chunk */ @@ -1553,16 +1608,18 @@ H5D__earray_idx_delete(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); /* Set up the context user data */ - ctx_udata.f = idx_info->f; - ctx_udata.chunk_size = idx_info->layout->size; + ctx_udata.f = idx_info->f; + + /* Compute number of bytes used to encode the chunk size */ + H5D_EARRAY_COMPUTE_CHUNK_SIZE_LEN(ctx_udata.chunk_size_len, idx_info); /* Delete extensible array */ - if (H5EA_delete(idx_info->f, idx_info->storage->idx_addr, &ctx_udata) < 0) + if (H5EA_delete(idx_info->f, idx_info->layout->storage.u.chunk.idx_addr, &ctx_udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk extensible array"); - idx_info->storage->idx_addr = HADDR_UNDEF; + idx_info->layout->storage.u.chunk.idx_addr = HADDR_UNDEF; } /* end if */ else - assert(NULL == idx_info->storage->u.earray.ea); + assert(NULL == idx_info->layout->storage.u.chunk.u.earray.ea); done: FUNC_LEAVE_NOAPI(ret_value) @@ -1589,13 +1646,11 @@ H5D__earray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk assert(idx_info_src->f); assert(idx_info_src->pline); assert(idx_info_src->layout); - assert(idx_info_src->storage); assert(idx_info_dst); assert(idx_info_dst->f); assert(idx_info_dst->pline); assert(idx_info_dst->layout); - assert(idx_info_dst->storage); - assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); + assert(!H5_addr_defined(idx_info_dst->layout->storage.u.chunk.idx_addr)); /* Check if the source extensible array is open yet */ if (!H5D_EARRAY_IDX_IS_OPEN(idx_info_src)) @@ -1609,7 +1664,7 @@ H5D__earray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk /* Create the extensible array that describes chunked storage in the dest. file */ if (H5D__earray_idx_create(idx_info_dst) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage"); - assert(H5_addr_defined(idx_info_dst->storage->idx_addr)); + assert(H5_addr_defined(idx_info_dst->layout->storage.u.chunk.idx_addr)); /* Reset metadata tag */ H5_END_TAG @@ -1676,8 +1731,7 @@ H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(index_size); /* Open the extensible array in file */ @@ -1685,7 +1739,7 @@ H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); /* Set convenience pointer to extensible array structure */ - ea = idx_info->storage->u.earray.ea; + ea = idx_info->layout->storage.u.chunk.u.earray.ea; /* Get the extensible array statistics */ if (H5EA_get_stats(ea, &ea_stat) < 0) @@ -1696,7 +1750,7 @@ H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) ea_stat.stored.super_blk_size + ea_stat.stored.data_blk_size; done: - if (idx_info->storage->u.earray.ea) { + if (idx_info->layout->storage.u.chunk.u.earray.ea) { if (H5D__earray_idx_close(idx_info) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); } /* end if */ @@ -1773,12 +1827,12 @@ H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info) /* Check args */ assert(idx_info); assert(idx_info->f); - assert(idx_info->storage); + assert(idx_info->layout); /* Check if the extensible array is open */ if (H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in ea if needed */ - if (H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f) < 0) + if (H5EA_patch_file(idx_info->layout->storage.u.chunk.u.earray.ea, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch earray file pointer"); /* Close extensible array */ diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c index f8770322cde..1f83367e08c 100644 --- a/src/H5Dfarray.c +++ b/src/H5Dfarray.c @@ -37,7 +37,7 @@ /* Local Macros */ /****************/ -#define H5D_FARRAY_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree2.bt2) +#define H5D_FARRAY_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->layout->storage.u.chunk.u.btree2.bt2) /* Value to fill unset array elements with */ #define H5D_FARRAY_FILL HADDR_UNDEF @@ -46,14 +46,38 @@ HADDR_UNDEF, 0, 0 \ } +/* + * Macros to compute the size required for encoding the size of a chunk. For version 4, this is the minimum + * number of bytes required to encode the size of an unfiltered chunk plus an extra byte, in case the filter + * makes the chunk larger. For versions after 4, this is simply the size of lengths for the file. For + * unfiltered chunks, this is 0. + */ +#define H5D_FARRAY_FILT_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, f, layout) \ + do { \ + if ((layout)->version > H5O_LAYOUT_VERSION_4) \ + (chunk_size_len) = H5F_SIZEOF_SIZE(f); \ + else { \ + (chunk_size_len) = 1 + ((H5VM_log2_gen((uint64_t)(layout)->u.chunk.size) + 8) / 8); \ + if ((chunk_size_len) > 8) \ + (chunk_size_len) = 8; \ + } \ + } while (0) +#define H5D_FARRAY_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, idx_info) \ + do { \ + if ((idx_info)->pline->nused > 0) \ + H5D_FARRAY_FILT_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, (idx_info)->f, (idx_info)->layout); \ + else \ + (chunk_size_len) = 0; \ + } while (0) + /******************/ /* Local Typedefs */ /******************/ /* Fixed array create/open user data */ typedef struct H5D_farray_ctx_ud_t { - const H5F_t *f; /* Pointer to file info */ - uint32_t chunk_size; /* Size of chunk (bytes) */ + const H5F_t *f; /* Pointer to file info */ + size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */ } H5D_farray_ctx_ud_t; /* Fixed array callback context */ @@ -102,6 +126,7 @@ static herr_t H5D__farray_filt_fill(void *nat_blk, size_t nelmts); static herr_t H5D__farray_filt_encode(void *raw, const void *elmt, size_t nelmts, void *ctx); static herr_t H5D__farray_filt_decode(const void *raw, void *elmt, size_t nelmts, void *ctx); static herr_t H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const void *elmt); +static void *H5D__farray_filt_crt_dbg_context(H5F_t *f, haddr_t obj_addr); /* Chunked layout indexing callbacks */ static herr_t H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, @@ -184,17 +209,17 @@ const H5FA_class_t H5FA_CLS_CHUNK[1] = {{ /* Fixed array class callbacks for dataset chunks w/filters */ const H5FA_class_t H5FA_CLS_FILT_CHUNK[1] = {{ - H5FA_CLS_FILT_CHUNK_ID, /* Type of fixed array */ - "Chunk w/filters", /* Name of fixed array class */ - sizeof(H5D_farray_filt_elmt_t), /* Size of native element */ - H5D__farray_crt_context, /* Create context */ - H5D__farray_dst_context, /* Destroy context */ - H5D__farray_filt_fill, /* Fill block of missing elements callback */ - H5D__farray_filt_encode, /* Element encoding callback */ - H5D__farray_filt_decode, /* Element decoding callback */ - H5D__farray_filt_debug, /* Element debugging callback */ - H5D__farray_crt_dbg_context, /* Create debugging context */ - H5D__farray_dst_dbg_context /* Destroy debugging context */ + H5FA_CLS_FILT_CHUNK_ID, /* Type of fixed array */ + "Chunk w/filters", /* Name of fixed array class */ + sizeof(H5D_farray_filt_elmt_t), /* Size of native element */ + H5D__farray_crt_context, /* Create context */ + H5D__farray_dst_context, /* Destroy context */ + H5D__farray_filt_fill, /* Fill block of missing elements callback */ + H5D__farray_filt_encode, /* Element encoding callback */ + H5D__farray_filt_decode, /* Element decoding callback */ + H5D__farray_filt_debug, /* Element debugging callback */ + H5D__farray_filt_crt_dbg_context, /* Create debugging context */ + H5D__farray_dst_dbg_context /* Destroy debugging context */ }}; /* Declare a free list to manage the H5D_farray_ctx_t struct */ @@ -225,21 +250,14 @@ H5D__farray_crt_context(void *_udata) /* Sanity checks */ assert(udata); assert(udata->f); - assert(udata->chunk_size > 0); /* Allocate new context structure */ if (NULL == (ctx = H5FL_MALLOC(H5D_farray_ctx_t))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate fixed array client callback context"); /* Initialize the context */ - ctx->file_addr_len = H5F_SIZEOF_ADDR(udata->f); - - /* Compute the size required for encoding the size of a chunk, allowing - * for an extra byte, in case the filter makes the chunk larger. - */ - ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8); - if (ctx->chunk_size_len > 8) - ctx->chunk_size_len = 8; + ctx->file_addr_len = H5F_SIZEOF_ADDR(udata->f); + ctx->chunk_size_len = udata->chunk_size_len; /* Set return value */ ret_value = ctx; @@ -420,13 +438,10 @@ H5D__farray_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const void *------------------------------------------------------------------------- */ static void * -H5D__farray_crt_dbg_context(H5F_t *f, haddr_t obj_addr) +H5D__farray_crt_dbg_context(H5F_t *f, haddr_t H5_ATTR_UNUSED obj_addr) { - H5D_farray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */ - H5O_loc_t obj_loc; /* Pointer to an object's location */ - bool obj_opened = false; /* Flag to indicate that the object header was opened */ - H5O_layout_t layout; /* Layout message */ - void *ret_value = NULL; /* Return value */ + H5D_farray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -438,44 +453,20 @@ H5D__farray_crt_dbg_context(H5F_t *f, haddr_t obj_addr) if (NULL == (dbg_ctx = H5FL_MALLOC(H5D_farray_ctx_ud_t))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate fixed array client callback context"); - /* Set up the object header location info */ - H5O_loc_reset(&obj_loc); - obj_loc.file = f; - obj_loc.addr = obj_addr; - - /* Open the object header where the layout message resides */ - if (H5O_open(&obj_loc) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "can't open object header"); - obj_opened = true; - - /* Read the layout message */ - if (NULL == H5O_msg_read(&obj_loc, H5O_LAYOUT_ID, &layout)) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get layout info"); - - /* close the object header */ - if (H5O_close(&obj_loc, NULL) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header"); - /* Create user data */ - dbg_ctx->f = f; - dbg_ctx->chunk_size = layout.u.chunk.size; + dbg_ctx->f = f; + dbg_ctx->chunk_size_len = 0; /* Set return value */ ret_value = dbg_ctx; done: /* Cleanup on error */ - if (ret_value == NULL) { + if (ret_value == NULL) /* Release context structure */ if (dbg_ctx) dbg_ctx = H5FL_FREE(H5D_farray_ctx_ud_t, dbg_ctx); - /* Close object header */ - if (obj_opened) - if (H5O_close(&obj_loc, NULL) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header"); - } /* end if */ - FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__farray_crt_dbg_context() */ @@ -651,6 +642,82 @@ H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__farray_filt_debug() */ +/*------------------------------------------------------------------------- + * Function: H5D__farray_filt_crt_dbg_context + * + * Purpose: Create context for debugging callback + * (get the layout message in the specified object header) + * + * Return: Success: non-NULL + * Failure: NULL + * + *------------------------------------------------------------------------- + */ +static void * +H5D__farray_filt_crt_dbg_context(H5F_t *f, haddr_t obj_addr) +{ + H5D_farray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */ + H5O_loc_t obj_loc; /* Pointer to an object's location */ + bool obj_opened = false; /* Flag to indicate that the object header was opened */ + H5O_layout_t layout; /* Layout message */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(f); + assert(H5_addr_defined(obj_addr)); + + /* Allocate context for debugging callback */ + if (NULL == (dbg_ctx = H5FL_MALLOC(H5D_farray_ctx_ud_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate fixed array client callback context"); + + /* Set up the object header location info */ + H5O_loc_reset(&obj_loc); + obj_loc.file = f; + obj_loc.addr = obj_addr; + + /* Open the object header where the layout message resides */ + if (H5O_open(&obj_loc) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "can't open object header"); + obj_opened = true; + + /* Read the layout message */ + if (NULL == H5O_msg_read(&obj_loc, H5O_LAYOUT_ID, &layout)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get layout info"); + + /* close the object header */ + if (H5O_close(&obj_loc, NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header"); + obj_opened = false; + + /* Create user data */ + dbg_ctx->f = f; + + /* Calculate length of chunk size field */ + H5D_FARRAY_FILT_COMPUTE_CHUNK_SIZE_LEN(dbg_ctx->chunk_size_len, f, &layout); + + /* Set return value */ + ret_value = dbg_ctx; + +done: + /* Cleanup on error */ + if (ret_value == NULL) { + /* Release context structure */ + if (dbg_ctx) + dbg_ctx = H5FL_FREE(H5D_farray_ctx_ud_t, dbg_ctx); + + /* Close object header */ + if (obj_opened) + if (H5O_close(&obj_loc, NULL) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header"); + } + else + assert(!obj_opened); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_filt_crt_dbg_context() */ + /*------------------------------------------------------------------------- * Function: H5D__farray_idx_depend * @@ -678,16 +745,15 @@ H5D__farray_idx_depend(const H5D_chk_idx_info_t *idx_info) assert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE); assert(idx_info->pline); assert(idx_info->layout); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(idx_info->storage->u.farray.fa); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->u.chunk.idx_type); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->storage.u.chunk.idx_type); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); + assert(idx_info->layout->storage.u.chunk.u.farray.fa); /* Set up object header location for dataset */ H5O_loc_reset(&oloc); oloc.file = idx_info->f; - oloc.addr = idx_info->storage->u.farray.dset_ohdr_addr; + oloc.addr = idx_info->layout->storage.u.chunk.u.farray.dset_ohdr_addr; /* Get header */ if (NULL == (oh = H5O_protect(&oloc, H5AC__READ_ONLY_FLAG, true))) @@ -698,7 +764,7 @@ H5D__farray_idx_depend(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get dataset object header proxy"); /* Make the fixed array a child flush dependency of the dataset's object header proxy */ - if (H5FA_depend(idx_info->storage->u.farray.fa, oh_proxy) < 0) + if (H5FA_depend(idx_info->layout->storage.u.chunk.u.farray.fa, oh_proxy) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header proxy"); @@ -727,10 +793,10 @@ H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNU /* Check args */ assert(idx_info); - assert(idx_info->storage); + assert(idx_info->layout); assert(H5_addr_defined(dset_ohdr_addr)); - idx_info->storage->u.farray.dset_ohdr_addr = dset_ohdr_addr; + idx_info->layout->storage.u.chunk.u.farray.dset_ohdr_addr = dset_ohdr_addr; FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__farray_idx_init() */ @@ -753,9 +819,10 @@ H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNU static herr_t H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info) { - H5FA_create_t cparam; /* Fixed array creation parameters */ - H5D_farray_ctx_ud_t udata; /* User data for fixed array create call */ - herr_t ret_value = SUCCEED; /* Return value */ + H5FA_create_t cparam; /* Fixed array creation parameters */ + H5D_farray_ctx_ud_t udata; /* User data for fixed array create call */ + unsigned chunk_size_len = 0; /* Size of encoded chunk size */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -764,22 +831,13 @@ H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(!H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.farray.fa); - assert(idx_info->layout->nchunks); + assert(!H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); + assert(NULL == idx_info->layout->storage.u.chunk.u.farray.fa); + assert(idx_info->layout->u.chunk.nchunks); /* General parameters */ + H5D_FARRAY_COMPUTE_CHUNK_SIZE_LEN(chunk_size_len, idx_info); if (idx_info->pline->nused > 0) { - unsigned chunk_size_len; /* Size of encoded chunk size */ - - /* Compute the size required for encoding the size of a chunk, allowing - * for an extra byte, in case the filter makes the chunk larger. - */ - chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8); - if (chunk_size_len > 8) - chunk_size_len = 8; - cparam.cls = H5FA_CLS_FILT_CHUNK; cparam.raw_elmt_size = (uint8_t)(H5F_SIZEOF_ADDR(idx_info->f) + chunk_size_len + 4); } /* end if */ @@ -787,20 +845,21 @@ H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info) cparam.cls = H5FA_CLS_CHUNK; cparam.raw_elmt_size = (uint8_t)H5F_SIZEOF_ADDR(idx_info->f); } /* end else */ - cparam.max_dblk_page_nelmts_bits = idx_info->layout->u.farray.cparam.max_dblk_page_nelmts_bits; + cparam.max_dblk_page_nelmts_bits = idx_info->layout->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits; assert(cparam.max_dblk_page_nelmts_bits > 0); - cparam.nelmts = idx_info->layout->max_nchunks; + cparam.nelmts = idx_info->layout->u.chunk.max_nchunks; /* Set up the user data */ - udata.f = idx_info->f; - udata.chunk_size = idx_info->layout->size; + udata.f = idx_info->f; + udata.chunk_size_len = (size_t)chunk_size_len; /* Create the fixed array for the chunk index */ - if (NULL == (idx_info->storage->u.farray.fa = H5FA_create(idx_info->f, &cparam, &udata))) + if (NULL == (idx_info->layout->storage.u.chunk.u.farray.fa = H5FA_create(idx_info->f, &cparam, &udata))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create fixed array"); /* Get the address of the fixed array in file */ - if (H5FA_get_addr(idx_info->storage->u.farray.fa, &(idx_info->storage->idx_addr)) < 0) + if (H5FA_get_addr(idx_info->layout->storage.u.chunk.u.farray.fa, + &(idx_info->layout->storage.u.chunk.idx_addr)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array address"); /* Check for SWMR writes to the file */ @@ -837,19 +896,20 @@ H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.farray.fa); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->u.chunk.idx_type); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->storage.u.chunk.idx_type); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); + assert(NULL == idx_info->layout->storage.u.chunk.u.farray.fa); /* Set up the user data */ - udata.f = idx_info->f; - udata.chunk_size = idx_info->layout->size; + udata.f = idx_info->f; + + /* Compute number of bytes used to encode the chunk size */ + H5D_FARRAY_COMPUTE_CHUNK_SIZE_LEN(udata.chunk_size_len, idx_info); /* Open the fixed array for the chunk index */ - if (NULL == - (idx_info->storage->u.farray.fa = H5FA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) + if (NULL == (idx_info->layout->storage.u.chunk.u.farray.fa = + H5FA_open(idx_info->f, idx_info->layout->storage.u.chunk.idx_addr, &udata))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open fixed array"); /* Check for SWMR writes to the file */ @@ -880,13 +940,13 @@ H5D__farray_idx_close(const H5D_chk_idx_info_t *idx_info) FUNC_ENTER_PACKAGE assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); - assert(idx_info->storage->u.farray.fa); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->storage.u.chunk.idx_type); + assert(idx_info->layout->storage.u.chunk.u.farray.fa); - if (H5FA_close(idx_info->storage->u.farray.fa) < 0) + if (H5FA_close(idx_info->layout->storage.u.chunk.u.farray.fa) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); - idx_info->storage->u.farray.fa = NULL; + idx_info->layout->storage.u.chunk.u.farray.fa = NULL; done: FUNC_LEAVE_NOAPI(ret_value) @@ -907,8 +967,8 @@ H5D__farray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) FUNC_ENTER_PACKAGE_NOERR assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->storage.u.chunk.idx_type); assert(is_open); *is_open = H5D_FARRAY_IDX_IS_OPEN(idx_info); @@ -959,8 +1019,7 @@ H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Check if the fixed array is open yet */ @@ -970,10 +1029,10 @@ H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); } else /* Patch the top level file pointer contained in fa if needed */ - H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f); + H5FA_patch_file(idx_info->layout->storage.u.chunk.u.farray.fa, idx_info->f); /* Set convenience pointer to fixed array structure */ - fa = idx_info->storage->u.farray.fa; + fa = idx_info->layout->storage.u.chunk.u.farray.fa; if (!H5_addr_defined(udata->chunk_block.offset)) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "The chunk should have allocated already"); @@ -1027,8 +1086,7 @@ H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Check if the fixed array is open yet */ @@ -1038,14 +1096,14 @@ H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); } else /* Patch the top level file pointer contained in fa if needed */ - H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f); + H5FA_patch_file(idx_info->layout->storage.u.chunk.u.farray.fa, idx_info->f); /* Set convenience pointer to fixed array structure */ - fa = idx_info->storage->u.farray.fa; + fa = idx_info->layout->storage.u.chunk.u.farray.fa; /* Calculate the index of this chunk */ - idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, - udata->common.scaled); + idx = H5VM_array_offset_pre((idx_info->layout->u.chunk.ndims - 1), + idx_info->layout->u.chunk.max_down_chunks, udata->common.scaled); udata->chunk_idx = idx; @@ -1068,7 +1126,7 @@ H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address"); /* Update the other (constant) information for the chunk */ - udata->chunk_block.length = idx_info->layout->size; + udata->chunk_block.length = idx_info->layout->u.chunk.size; udata->filter_mask = 0; } /* end else */ @@ -1106,8 +1164,8 @@ H5D__farray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info) * explicitly, perform a fake lookup of a chunk to cause * it to be read in. */ - chunk_ud.common.layout = idx_info->layout; - chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.layout = &idx_info->layout->u.chunk; + chunk_ud.common.storage = &idx_info->layout->storage.u.chunk; chunk_ud.common.scaled = scaled; chunk_ud.chunk_block.offset = HADDR_UNDEF; @@ -1203,8 +1261,7 @@ H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(chunk_cb); assert(chunk_udata); @@ -1215,10 +1272,10 @@ H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); } else /* Patch the top level file pointer contained in fa if needed */ - H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f); + H5FA_patch_file(idx_info->layout->storage.u.chunk.u.farray.fa, idx_info->f); /* Set convenience pointer to fixed array structure */ - fa = idx_info->storage->u.farray.fa; + fa = idx_info->layout->storage.u.chunk.u.farray.fa; /* Get the fixed array statistics */ if (H5FA_get_stats(fa, &fa_stat) < 0) @@ -1230,12 +1287,12 @@ H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t /* Initialize userdata */ memset(&udata, 0, sizeof udata); - udata.common.layout = idx_info->layout; - udata.common.storage = idx_info->storage; + udata.common.layout = &idx_info->layout->u.chunk; + udata.common.storage = &idx_info->layout->storage.u.chunk; memset(&udata.chunk_rec, 0, sizeof(udata.chunk_rec)); udata.filtered = (idx_info->pline->nused > 0); if (!udata.filtered) { - udata.chunk_rec.nbytes = idx_info->layout->size; + udata.chunk_rec.nbytes = idx_info->layout->u.chunk.size; udata.chunk_rec.filter_mask = 0; } /* end if */ udata.cb = chunk_cb; @@ -1273,8 +1330,7 @@ H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(udata); /* Check if the fixed array is open yet */ @@ -1284,15 +1340,15 @@ H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); } else /* Patch the top level file pointer contained in fa if needed */ - if (H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f) < 0) + if (H5FA_patch_file(idx_info->layout->storage.u.chunk.u.farray.fa, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch fixed array file pointer"); /* Set convenience pointer to fixed array structure */ - fa = idx_info->storage->u.farray.fa; + fa = idx_info->layout->storage.u.chunk.u.farray.fa; /* Calculate the index of this chunk */ - idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, - udata->scaled); + idx = H5VM_array_offset_pre((idx_info->layout->u.chunk.ndims - 1), + idx_info->layout->u.chunk.max_down_chunks, udata->scaled); /* Check for filters on chunks */ if (idx_info->pline->nused > 0) { @@ -1327,8 +1383,8 @@ H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t /* Remove raw data chunk from file if not doing SWMR writes */ assert(H5_addr_defined(addr)); if (!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) { - H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */ uint32_t, /*To: */ hsize_t); - if (H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, addr, (hsize_t)idx_info->layout->size) < 0) + H5_CHECK_OVERFLOW(idx_info->layout->u.chunk.size, /*From: */ uint32_t, /*To: */ hsize_t); + if (H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, addr, (hsize_t)idx_info->layout->u.chunk.size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk"); } /* end if */ @@ -1398,10 +1454,9 @@ H5D__farray_idx_delete(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); /* Check if the index data structure has been allocated */ - if (H5_addr_defined(idx_info->storage->idx_addr)) { + if (H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)) { H5D_farray_ctx_ud_t ctx_udata; /* User data for fixed array open call */ /* Iterate over the chunk addresses in the fixed array, deleting each chunk */ @@ -1413,16 +1468,18 @@ H5D__farray_idx_delete(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); /* Set up the user data */ - ctx_udata.f = idx_info->f; - ctx_udata.chunk_size = idx_info->layout->size; + ctx_udata.f = idx_info->f; + + /* Compute number of bytes used to encode the chunk size */ + H5D_FARRAY_COMPUTE_CHUNK_SIZE_LEN(ctx_udata.chunk_size_len, idx_info); /* Delete fixed array */ - if (H5FA_delete(idx_info->f, idx_info->storage->idx_addr, &ctx_udata) < 0) + if (H5FA_delete(idx_info->f, idx_info->layout->storage.u.chunk.idx_addr, &ctx_udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk fixed array"); - idx_info->storage->idx_addr = HADDR_UNDEF; + idx_info->layout->storage.u.chunk.idx_addr = HADDR_UNDEF; } /* end if */ else - assert(NULL == idx_info->storage->u.farray.fa); + assert(NULL == idx_info->layout->storage.u.chunk.u.farray.fa); done: FUNC_LEAVE_NOAPI(ret_value) @@ -1449,13 +1506,11 @@ H5D__farray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk assert(idx_info_src->f); assert(idx_info_src->pline); assert(idx_info_src->layout); - assert(idx_info_src->storage); assert(idx_info_dst); assert(idx_info_dst->f); assert(idx_info_dst->pline); assert(idx_info_dst->layout); - assert(idx_info_dst->storage); - assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); + assert(!H5_addr_defined(idx_info_dst->layout->storage.u.chunk.idx_addr)); /* Check if the source fixed array is open yet */ if (!H5D_FARRAY_IDX_IS_OPEN(idx_info_src)) { @@ -1470,7 +1525,7 @@ H5D__farray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk /* Create the fixed array that describes chunked storage in the dest. file */ if (H5D__farray_idx_create(idx_info_dst) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage"); - assert(H5_addr_defined(idx_info_dst->storage->idx_addr)); + assert(H5_addr_defined(idx_info_dst->layout->storage.u.chunk.idx_addr)); /* Reset metadata tag */ H5_END_TAG @@ -1537,8 +1592,7 @@ H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); assert(index_size); /* Open the fixed array in file */ @@ -1546,7 +1600,7 @@ H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); /* Set convenience pointer to fixed array structure */ - fa = idx_info->storage->u.farray.fa; + fa = idx_info->layout->storage.u.chunk.u.farray.fa; /* Get the fixed array statistics */ if (H5FA_get_stats(fa, &fa_stat) < 0) @@ -1556,7 +1610,7 @@ H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) *index_size += fa_stat.dblk_size; done: - if (idx_info->storage->u.farray.fa) { + if (idx_info->layout->storage.u.chunk.u.farray.fa) { if (H5D__farray_idx_close(idx_info) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); } /* end if */ @@ -1631,12 +1685,12 @@ H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info) /* Check args */ assert(idx_info); assert(idx_info->f); - assert(idx_info->storage); + assert(idx_info->layout); /* Check if the fixed array is open */ if (H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in fa if needed */ - if (H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f) < 0) + if (H5FA_patch_file(idx_info->layout->storage.u.chunk.u.farray.fa, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch fixed array file pointer"); /* Close fixed array */ diff --git a/src/H5Dint.c b/src/H5Dint.c index 32160a11949..0bcd9249586 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -1332,8 +1332,7 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, hid_t if (new_dset->shared->layout.version >= H5O_LAYOUT_VERSION_4) { /* Use latest indexing type for layout message version >= 4 */ - if (H5D__layout_set_latest_indexing(&new_dset->shared->layout, new_dset->shared->space, - &new_dset->shared->dcpl_cache) < 0) + if (H5D__layout_set_latest_indexing(new_dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest indexing"); } /* end if */ @@ -3376,10 +3375,9 @@ H5D__format_convert(H5D_t *dataset) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate buffer"); /* Set up the current index info */ - idx_info.f = dataset->oloc.file; - idx_info.pline = &dataset->shared->dcpl_cache.pline; - idx_info.layout = &dataset->shared->layout.u.chunk; - idx_info.storage = &dataset->shared->layout.storage.u.chunk; + idx_info.f = dataset->oloc.file; + idx_info.pline = &dataset->shared->dcpl_cache.pline; + idx_info.layout = &dataset->shared->layout; /* Copy the current layout info to the new layout */ H5MM_memcpy(newlayout, &dataset->shared->layout, sizeof(H5O_layout_t)); @@ -3392,23 +3390,22 @@ H5D__format_convert(H5D_t *dataset) newlayout->storage.u.chunk.u.btree.shared = NULL; /* Set up the index info to version 1 B-tree */ - new_idx_info.f = dataset->oloc.file; - new_idx_info.pline = &dataset->shared->dcpl_cache.pline; - new_idx_info.layout = &(newlayout->u).chunk; - new_idx_info.storage = &(newlayout->storage).u.chunk; + new_idx_info.f = dataset->oloc.file; + new_idx_info.pline = &dataset->shared->dcpl_cache.pline; + new_idx_info.layout = newlayout; /* Initialize version 1 B-tree */ - if (new_idx_info.storage->ops->init && - (new_idx_info.storage->ops->init)(&new_idx_info, dataset->shared->space, dataset->oloc.addr) < - 0) + if (new_idx_info.layout->storage.u.chunk.ops->init && + (new_idx_info.layout->storage.u.chunk.ops->init)(&new_idx_info, dataset->shared->space, + dataset->oloc.addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize indexing information"); init_new_index = true; /* If the current chunk index exists */ - if (H5_addr_defined(idx_info.storage->idx_addr)) { + if (H5_addr_defined(idx_info.layout->storage.u.chunk.idx_addr)) { /* Create v1 B-tree chunk index */ - if ((new_idx_info.storage->ops->create)(&new_idx_info) < 0) + if ((new_idx_info.layout->storage.u.chunk.ops->create)(&new_idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create chunk index"); /* Iterate over the chunks in the current index and insert the chunk addresses @@ -3431,7 +3428,8 @@ H5D__format_convert(H5D_t *dataset) add_new_layout = true; /* Release the old (current) chunk index */ - if (idx_info.storage->ops->dest && (idx_info.storage->ops->dest)(&idx_info) < 0) + if (idx_info.layout->storage.u.chunk.ops->dest && + (idx_info.layout->storage.u.chunk.ops->dest)(&idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to release chunk index info"); /* Copy the new layout to the dataset's layout */ @@ -3474,7 +3472,7 @@ H5D__format_convert(H5D_t *dataset) /* Clean up v1 b-tree chunk index */ if (init_new_index) { - if (H5_addr_defined(new_idx_info.storage->idx_addr)) { + if (H5_addr_defined(new_idx_info.layout->storage.u.chunk.idx_addr)) { /* Check for valid address i.e. tag */ if (!H5_addr_defined(dataset->oloc.addr)) HDONE_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "address undefined"); @@ -3486,7 +3484,8 @@ H5D__format_convert(H5D_t *dataset) } /* end if */ /* Delete v1 B-tree chunk index */ - if (new_idx_info.storage->ops->dest && (new_idx_info.storage->ops->dest)(&new_idx_info) < 0) + if (new_idx_info.layout->storage.u.chunk.ops->dest && + (new_idx_info.layout->storage.u.chunk.ops->dest)(&new_idx_info) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to release chunk index info"); } /* end if */ } /* end if */ diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c index eb5b84217af..67d2732873e 100644 --- a/src/H5Dlayout.c +++ b/src/H5Dlayout.c @@ -48,7 +48,7 @@ const unsigned H5O_layout_ver_bounds[] = { H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V110 */ H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V112 */ H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V114 */ - H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V200 */ + H5O_LAYOUT_VERSION_5, /* H5F_LIBVER_V200 */ H5O_LAYOUT_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; @@ -277,7 +277,7 @@ H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout, bool include_c * Purpose: Set the version to encode a layout with. * * Return: Non-negative on success/Negative on failure - * + *------------------------------------------------------------------------- */ herr_t H5D__layout_set_version(H5F_t *f, H5O_layout_t *layout) @@ -292,6 +292,8 @@ H5D__layout_set_version(H5F_t *f, H5O_layout_t *layout) assert(f); /* Upgrade to the version indicated by the file's low bound if higher */ + /* This will be downgraded in H5D__layout_set_latest_indexing() if there is no benefit to the newer + * version */ version = MAX(layout->version, H5O_layout_ver_bounds[H5F_LOW_BOUND(f)]); /* Version bounds check */ @@ -315,16 +317,20 @@ H5D__layout_set_version(H5F_t *f, H5O_layout_t *layout) *------------------------------------------------------------------------- */ herr_t -H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, const H5D_dcpl_cache_t *dcpl_cache) +H5D__layout_set_latest_indexing(H5D_t *dset) { - herr_t ret_value = SUCCEED; /* Return value */ + H5O_layout_t *layout; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Sanity check */ - assert(layout); - assert(space); - assert(dcpl_cache); + assert(dset); + assert(dset->shared); + assert(dset->shared->space); + + /* Set convenience pointer */ + layout = &dset->shared->layout; /* The indexing methods only apply to chunked datasets (currently) */ if (layout->type == H5D_CHUNKED) { @@ -332,7 +338,7 @@ H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, const unsigned ndims; /* Rank of dataspace */ /* Query the dimensionality of the dataspace */ - if ((sndims = H5S_GET_EXTENT_NDIMS(space)) < 0) + if ((sndims = H5S_GET_EXTENT_NDIMS(dset->shared->space)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "invalid dataspace rank"); ndims = (unsigned)sndims; @@ -345,7 +351,7 @@ H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, const unsigned u; /* Local index variable */ /* Query the dataspace's dimensions */ - if (H5S_get_simple_extent_dims(space, cur_dims, max_dims) < 0) + if (H5S_get_simple_extent_dims(dset->shared->space, cur_dims, max_dims) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace max. dimensions"); /* Spin through the max. dimensions, looking for unlimited dimensions */ @@ -354,7 +360,7 @@ H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, const unlim_count++; if (cur_dims[u] != max_dims[u] || cur_dims[u] != layout->u.chunk.dim[u]) single = false; - } /* end for */ + } /* Chunked datasets with unlimited dimension(s) */ if (unlim_count) { /* dataset with unlimited dimension(s) must be chunked */ @@ -374,7 +380,12 @@ H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, const layout->u.chunk.u.earray.cparam.data_blk_min_elmts = H5D_EARRAY_DATA_BLK_MIN_ELMTS; layout->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits = H5D_EARRAY_MAX_DBLOCK_PAGE_NELMTS_BITS; - } /* end if */ + + /* If there are no filters, downgrade version to 4 since version 5 doesn't improve + * anything */ + if (!dset->shared->dcpl_cache.pline.nused) + layout->version = H5O_LAYOUT_VERSION_4; + } else { /* Chunked dataset with > 1 unlimited dimensions */ /* Set the chunk index type to v2 B-tree */ layout->u.chunk.idx_type = H5D_CHUNK_IDX_BT2; @@ -388,22 +399,34 @@ H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, const layout->u.chunk.u.btree2.cparam.node_size = H5D_BT2_NODE_SIZE; layout->u.chunk.u.btree2.cparam.split_percent = H5D_BT2_SPLIT_PERC; layout->u.chunk.u.btree2.cparam.merge_percent = H5D_BT2_MERGE_PERC; - } /* end else */ - } /* end if */ + + /* If there are no filters, downgrade version to 4 since version 5 doesn't improve + * anything */ + if (!dset->shared->dcpl_cache.pline.nused) + layout->version = H5O_LAYOUT_VERSION_4; + } + } else { /* Chunked dataset with fixed dimensions */ /* Check for correct condition for using "single chunk" chunk index */ if (single) { layout->u.chunk.idx_type = H5D_CHUNK_IDX_SINGLE; layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_SINGLE; layout->storage.u.chunk.ops = H5D_COPS_SINGLE; - } /* end if */ - else if (!dcpl_cache->pline.nused && dcpl_cache->fill.alloc_time == H5D_ALLOC_TIME_EARLY) { + + /* Downgrade version to 4 since version 5 doesn't improve anything */ + layout->version = H5O_LAYOUT_VERSION_4; + } + else if (!dset->shared->dcpl_cache.pline.nused && + dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY) { /* Set the chunk index type to "none" Index */ layout->u.chunk.idx_type = H5D_CHUNK_IDX_NONE; layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_NONE; layout->storage.u.chunk.ops = H5D_COPS_NONE; - } /* end else-if */ + + /* Downgrade version to 4 since version 5 doesn't improve anything */ + layout->version = H5O_LAYOUT_VERSION_4; + } else { /* Set the chunk index type to Fixed Array */ layout->u.chunk.idx_type = H5D_CHUNK_IDX_FARRAY; @@ -416,10 +439,18 @@ H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, const */ layout->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits = H5D_FARRAY_MAX_DBLK_PAGE_NELMTS_BITS; - } /* end else */ - } /* end else */ - } /* end if */ - } /* end if */ + + /* If there are no filters, downgrade version to 4 since version 5 doesn't improve + * anything */ + if (!dset->shared->dcpl_cache.pline.nused) + layout->version = H5O_LAYOUT_VERSION_4; + } + } + } + else + /* Rank 0 -> v1 b-tree. Downgrade version to 4 since version 5 doesn't improve anything */ + layout->version = H5O_LAYOUT_VERSION_4; + } done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 000ec3280d8..ded4686b8a8 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -82,10 +82,9 @@ */ #define H5D_MPIO_INIT_CHUNK_IDX_INFO(index_info, dset) \ do { \ - (index_info).f = (dset)->oloc.file; \ - (index_info).pline = &((dset)->shared->dcpl_cache.pline); \ - (index_info).layout = &((dset)->shared->layout.u.chunk); \ - (index_info).storage = &((dset)->shared->layout.storage.u.chunk); \ + (index_info).f = (dset)->oloc.file; \ + (index_info).pline = &((dset)->shared->dcpl_cache.pline); \ + (index_info).layout = &((dset)->shared->layout); \ } while (0) /******************/ @@ -3033,17 +3032,16 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as H5D_chk_idx_info_t idx_info; bool index_is_open; - idx_info.f = di->dset->oloc.file; - idx_info.pline = &di->dset->shared->dcpl_cache.pline; - idx_info.layout = &di->dset->shared->layout.u.chunk; - idx_info.storage = &di->dset->shared->layout.storage.u.chunk; + idx_info.f = di->dset->oloc.file; + idx_info.pline = &di->dset->shared->dcpl_cache.pline; + idx_info.layout = &di->dset->shared->layout; /* * The dataset's chunk index should be open at this point. * Otherwise, we will end up reading it in independently, * which may not be desired. */ - idx_info.storage->ops->is_open(&idx_info, &index_is_open); + idx_info.layout->storage.u.chunk.ops->is_open(&idx_info, &index_is_open); assert(index_is_open); } #endif @@ -3302,7 +3300,7 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const if ((fill_msg->alloc_time != H5D_ALLOC_TIME_INCR) || !curr_dset_info->index_empty) chunk_list->all_dset_indices_empty = false; - if (curr_dset_info->chunk_idx_info.storage->ops->insert) + if (curr_dset_info->chunk_idx_info.layout->storage.u.chunk.ops->insert) chunk_list->no_dset_index_insert_methods = false; /* @@ -5316,8 +5314,8 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * cached_dset_info = chunk_list->dset_info.single_dset_info; assert(cached_dset_info); - chunk_ud.common.layout = cached_dset_info->chunk_idx_info.layout; - chunk_ud.common.storage = cached_dset_info->chunk_idx_info.storage; + chunk_ud.common.layout = &cached_dset_info->chunk_idx_info.layout->u.chunk; + chunk_ud.common.storage = &cached_dset_info->chunk_idx_info.layout->storage.u.chunk; chunk_ud.common.scaled = scaled_coords; chunk_ud.chunk_block = coll_entry->chunk_block; @@ -5325,8 +5323,8 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * chunk_ud.filter_mask = coll_entry->index_info.filter_mask; /* Calculate scaled coordinates for the chunk */ - if (cached_dset_info->chunk_idx_info.layout->idx_type == H5D_CHUNK_IDX_EARRAY && - cached_dset_info->chunk_idx_info.layout->u.earray.unlim_dim > 0) { + if (cached_dset_info->chunk_idx_info.layout->u.chunk.idx_type == H5D_CHUNK_IDX_EARRAY && + cached_dset_info->chunk_idx_info.layout->u.chunk.u.earray.unlim_dim > 0) { /* * Extensible arrays where the unlimited dimension is not * the slowest-changing dimension "swizzle" the coordinates @@ -5340,12 +5338,13 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * * callback that accepts a chunk index and provides the * caller with the scaled coordinates for that chunk. */ - H5VM_array_calc_pre(chunk_ud.chunk_idx, cached_dset_info->dset_io_info->dset->shared->ndims, - cached_dset_info->chunk_idx_info.layout->u.earray.swizzled_down_chunks, - scaled_coords); + H5VM_array_calc_pre( + chunk_ud.chunk_idx, cached_dset_info->dset_io_info->dset->shared->ndims, + cached_dset_info->chunk_idx_info.layout->u.chunk.u.earray.swizzled_down_chunks, + scaled_coords); H5VM_unswizzle_coords(hsize_t, scaled_coords, - cached_dset_info->chunk_idx_info.layout->u.earray.unlim_dim); + cached_dset_info->chunk_idx_info.layout->u.chunk.u.earray.unlim_dim); } else { H5VM_array_calc_pre(chunk_ud.chunk_idx, cached_dset_info->dset_io_info->dset->shared->ndims, @@ -5388,7 +5387,7 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * /* Set metadata tagging with dataset oheader addr */ H5AC_tag(cached_dset_info->dset_io_info->dset->oloc.addr, &prev_tag); - if ((cached_dset_info->chunk_idx_info.storage->ops->insert)( + if ((cached_dset_info->chunk_idx_info.layout->storage.u.chunk.ops->insert)( &cached_dset_info->chunk_idx_info, &chunk_ud, cached_dset_info->dset_io_info->dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk address into index"); diff --git a/src/H5Dnone.c b/src/H5Dnone.c index 0d747c82d47..2f19e2887f2 100644 --- a/src/H5Dnone.c +++ b/src/H5Dnone.c @@ -128,12 +128,12 @@ H5D__none_idx_create(const H5D_chk_idx_info_t *idx_info) assert(idx_info->pline); assert(idx_info->pline->nused == 0); /* Shouldn't have filter defined on entering here */ assert(idx_info->layout); - assert(idx_info->storage); - assert(idx_info->layout->max_nchunks); - assert(!H5_addr_defined(idx_info->storage->idx_addr)); /* address of data shouldn't be defined */ + assert(idx_info->layout->u.chunk.max_nchunks); + assert(!H5_addr_defined( + idx_info->layout->storage.u.chunk.idx_addr)); /* address of data shouldn't be defined */ /* Calculate size of max dataset chunks */ - nbytes = idx_info->layout->max_nchunks * idx_info->layout->size; + nbytes = idx_info->layout->u.chunk.max_nchunks * idx_info->layout->u.chunk.size; /* Allocate space for max dataset chunks */ addr = H5MF_alloc(idx_info->f, H5FD_MEM_DRAW, nbytes); @@ -141,7 +141,7 @@ H5D__none_idx_create(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "file allocation failed"); /* This is the address of the dataset chunks */ - idx_info->storage->idx_addr = addr; + idx_info->layout->storage.u.chunk.idx_addr = addr; done: FUNC_LEAVE_NOAPI(ret_value) @@ -200,8 +200,8 @@ H5D__none_idx_is_open(const H5D_chk_idx_info_t H5_ATTR_NDEBUG_UNUSED *idx_info, FUNC_ENTER_PACKAGE_NOERR assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_NONE == idx_info->storage->idx_type); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_NONE == idx_info->layout->storage.u.chunk.idx_type); assert(is_open); *is_open = true; @@ -250,19 +250,19 @@ H5D__none_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata assert(idx_info->pline); assert(idx_info->pline->nused == 0); assert(idx_info->layout); - assert(idx_info->storage); assert(udata); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); /* Calculate the index of this chunk */ - udata->chunk_idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, - udata->common.scaled); + udata->chunk_idx = H5VM_array_offset_pre((idx_info->layout->u.chunk.ndims - 1), + idx_info->layout->u.chunk.max_down_chunks, udata->common.scaled); /* Calculate the address of the chunk */ - udata->chunk_block.offset = idx_info->storage->idx_addr + udata->chunk_idx * idx_info->layout->size; + udata->chunk_block.offset = + idx_info->layout->storage.u.chunk.idx_addr + udata->chunk_idx * idx_info->layout->u.chunk.size; /* Update the other (constant) information for the chunk */ - udata->chunk_block.length = idx_info->layout->size; + udata->chunk_block.length = idx_info->layout->u.chunk.size; udata->filter_mask = 0; FUNC_LEAVE_NOAPI(SUCCEED) @@ -316,26 +316,26 @@ H5D__none_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t ch assert(idx_info->pline); assert(!idx_info->pline->nused); assert(idx_info->layout); - assert(idx_info->storage); assert(chunk_cb); assert(chunk_udata); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); /* Initialize generic chunk record */ memset(&chunk_rec, 0, sizeof(chunk_rec)); - chunk_rec.nbytes = idx_info->layout->size; + chunk_rec.nbytes = idx_info->layout->u.chunk.size; chunk_rec.filter_mask = 0; - ndims = idx_info->layout->ndims - 1; + ndims = idx_info->layout->u.chunk.ndims - 1; assert(ndims > 0); /* Iterate over all the chunks in the dataset's dataspace */ - for (u = 0; u < idx_info->layout->nchunks && ret_value == H5_ITER_CONT; u++) { + for (u = 0; u < idx_info->layout->u.chunk.nchunks && ret_value == H5_ITER_CONT; u++) { /* Calculate the index of this chunk */ - idx = H5VM_array_offset_pre(ndims, idx_info->layout->max_down_chunks, chunk_rec.scaled); + idx = H5VM_array_offset_pre(ndims, idx_info->layout->u.chunk.max_down_chunks, chunk_rec.scaled); /* Calculate the address of the chunk */ - chunk_rec.chunk_addr = idx_info->storage->idx_addr + idx * idx_info->layout->size; + chunk_rec.chunk_addr = + idx_info->layout->storage.u.chunk.idx_addr + idx * idx_info->layout->u.chunk.size; /* Make "generic chunk" callback */ if ((ret_value = (*chunk_cb)(&chunk_rec, chunk_udata)) < 0) @@ -349,7 +349,7 @@ H5D__none_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t ch chunk_rec.scaled[curr_dim]++; /* Check if we went off the end of the current dimension */ - if (chunk_rec.scaled[curr_dim] >= idx_info->layout->chunks[curr_dim]) { + if (chunk_rec.scaled[curr_dim] >= idx_info->layout->u.chunk.chunks[curr_dim]) { /* Reset coordinate & move to next faster dimension */ chunk_rec.scaled[curr_dim] = 0; curr_dim--; @@ -411,15 +411,14 @@ H5D__none_idx_delete(const H5D_chk_idx_info_t *idx_info) assert(idx_info->pline); assert(!idx_info->pline->nused); /* Shouldn't have filter defined on entering here */ assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); /* should be defined */ + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); /* should be defined */ /* chunk size * max # of chunks */ - nbytes = idx_info->layout->max_nchunks * idx_info->layout->size; - if (H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->storage->idx_addr, nbytes) < 0) + nbytes = idx_info->layout->u.chunk.max_nchunks * idx_info->layout->u.chunk.size; + if (H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->layout->storage.u.chunk.idx_addr, nbytes) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "unable to free dataset chunks"); - idx_info->storage->idx_addr = HADDR_UNDEF; + idx_info->layout->storage.u.chunk.idx_addr = HADDR_UNDEF; done: FUNC_LEAVE_NOAPI(ret_value) @@ -448,15 +447,13 @@ H5D__none_idx_copy_setup(const H5D_chk_idx_info_t H5_ATTR_NDEBUG_UNUSED *idx_inf assert(idx_info_src->pline); assert(!idx_info_src->pline->nused); assert(idx_info_src->layout); - assert(idx_info_src->storage); - assert(H5_addr_defined(idx_info_src->storage->idx_addr)); + assert(H5_addr_defined(idx_info_src->layout->storage.u.chunk.idx_addr)); assert(idx_info_dst); assert(idx_info_dst->f); assert(idx_info_dst->pline); assert(!idx_info_dst->pline->nused); assert(idx_info_dst->layout); - assert(idx_info_dst->storage); /* Set copied metadata tag */ H5_BEGIN_TAG(H5AC__COPIED_TAG) diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 6271d6c78e5..08429e5890f 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -375,10 +375,9 @@ typedef struct H5D_io_info_wrap_t { /* Typedef for chunked dataset index operation info */ typedef struct H5D_chk_idx_info_t { - H5F_t *f; /* File pointer for operation */ - const H5O_pline_t *pline; /* I/O pipeline info */ - H5O_layout_chunk_t *layout; /* Chunk layout description */ - H5O_storage_chunk_t *storage; /* Chunk storage description */ + H5F_t *f; /* File pointer for operation */ + const H5O_pline_t *pline; /* I/O pipeline info */ + H5O_layout_t *layout; /* Layout description */ } H5D_chk_idx_info_t; /* @@ -734,8 +733,7 @@ H5_DLL herr_t H5D__scatgath_write_select(H5D_io_info_t *io_info); H5_DLL herr_t H5D__layout_set_io_ops(const H5D_t *dataset); H5_DLL size_t H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout, bool include_compact_data); H5_DLL herr_t H5D__layout_set_version(H5F_t *f, H5O_layout_t *layout); -H5_DLL herr_t H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, - const H5D_dcpl_cache_t *dcpl_cache); +H5_DLL herr_t H5D__layout_set_latest_indexing(H5D_t *dset); H5_DLL herr_t H5D__layout_oh_create(H5F_t *file, H5O_t *oh, H5D_t *dset, hid_t dapl_id); H5_DLL herr_t H5D__layout_oh_read(H5D_t *dset, hid_t dapl_id, H5P_genplist_t *plist); H5_DLL herr_t H5D__layout_oh_write(const H5D_t *dataset, H5O_t *oh, unsigned update_flags); @@ -778,13 +776,12 @@ H5_DLL herr_t H5D__chunk_set_sizes(H5D_t *dset); H5_DLL herr_t H5D__chunk_addrmap(const H5D_t *dset, haddr_t chunk_addr[]); #endif /* H5_HAVE_PARALLEL */ H5_DLL herr_t H5D__chunk_update_cache(H5D_t *dset); -H5_DLL herr_t H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk_t *layout_src, - H5F_t *f_dst, H5O_storage_chunk_t *storage_dst, +H5_DLL herr_t H5D__chunk_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst, H5O_layout_t *layout_dst, const H5S_extent_t *ds_extent_src, H5T_t *dt_src, const H5O_pline_t *pline_src, H5O_copy_t *cpy_info); H5_DLL herr_t H5D__chunk_bh_info(const H5O_loc_t *loc, H5O_t *oh, H5O_layout_t *layout, hsize_t *btree_size); H5_DLL herr_t H5D__chunk_dump_index(H5D_t *dset, FILE *stream); -H5_DLL herr_t H5D__chunk_delete(H5F_t *f, H5O_t *oh, H5O_storage_t *store); +H5_DLL herr_t H5D__chunk_delete(H5F_t *f, H5O_t *oh, H5O_layout_t *layout); H5_DLL herr_t H5D__chunk_get_offset_copy(const H5D_t *dset, const hsize_t *offset, hsize_t *offset_copy); H5_DLL herr_t H5D__chunk_direct_write(H5D_t *dset, uint32_t filters, hsize_t *offset, uint32_t data_size, const void *buf); diff --git a/src/H5Dsingle.c b/src/H5Dsingle.c index fb9ffc27273..f4bda7c0030 100644 --- a/src/H5Dsingle.c +++ b/src/H5Dsingle.c @@ -122,18 +122,17 @@ H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNU assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); if (idx_info->pline->nused) { - idx_info->layout->flags |= H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER; + idx_info->layout->u.chunk.flags |= H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER; - if (!H5_addr_defined(idx_info->storage->idx_addr)) { - idx_info->storage->u.single.nbytes = 0; - idx_info->storage->u.single.filter_mask = 0; + if (!H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)) { + idx_info->layout->storage.u.chunk.u.single.nbytes = 0; + idx_info->layout->storage.u.chunk.u.single.filter_mask = 0; } } else - idx_info->layout->flags = 0; + idx_info->layout->u.chunk.flags = 0; FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__single_idx_init() */ @@ -158,15 +157,14 @@ H5D__single_idx_create(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(idx_info->layout->max_nchunks == idx_info->layout->nchunks); - assert(idx_info->layout->nchunks == 1); - assert(!H5_addr_defined(idx_info->storage->idx_addr)); + assert(idx_info->layout->u.chunk.max_nchunks == idx_info->layout->u.chunk.nchunks); + assert(idx_info->layout->u.chunk.nchunks == 1); + assert(!H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); if (idx_info->pline->nused) - assert(idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER); + assert(idx_info->layout->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER); else - assert(!(idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER)); + assert(!(idx_info->layout->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER)); FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__single_idx_create() */ @@ -224,8 +222,8 @@ H5D__single_idx_is_open(const H5D_chk_idx_info_t H5_ATTR_NDEBUG_UNUSED *idx_info FUNC_ENTER_PACKAGE_NOERR assert(idx_info); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_SINGLE == idx_info->storage->idx_type); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_SINGLE == idx_info->layout->storage.u.chunk.idx_type); assert(is_open); *is_open = true; @@ -274,18 +272,18 @@ H5D__single_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(idx_info->layout->nchunks == 1); - assert(idx_info->layout->max_nchunks == 1); + assert(idx_info->layout->u.chunk.nchunks == 1); + assert(idx_info->layout->u.chunk.max_nchunks == 1); assert(udata); /* Set the address for the chunk */ assert(H5_addr_defined(udata->chunk_block.offset)); - idx_info->storage->idx_addr = udata->chunk_block.offset; + idx_info->layout->storage.u.chunk.idx_addr = udata->chunk_block.offset; if (idx_info->pline->nused > 0) { - H5_CHECKED_ASSIGN(idx_info->storage->u.single.nbytes, uint32_t, udata->chunk_block.length, hsize_t); - idx_info->storage->u.single.filter_mask = udata->filter_mask; + H5_CHECKED_ASSIGN(idx_info->layout->storage.u.chunk.u.single.nbytes, uint32_t, + udata->chunk_block.length, hsize_t); + idx_info->layout->storage.u.chunk.u.single.filter_mask = udata->filter_mask; } /* end if */ if (dset) @@ -318,18 +316,17 @@ H5D__single_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(idx_info->layout->nchunks == 1); - assert(idx_info->layout->max_nchunks == 1); + assert(idx_info->layout->u.chunk.nchunks == 1); + assert(idx_info->layout->u.chunk.max_nchunks == 1); assert(udata); - udata->chunk_block.offset = idx_info->storage->idx_addr; - if (idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) { - udata->chunk_block.length = idx_info->storage->u.single.nbytes; - udata->filter_mask = idx_info->storage->u.single.filter_mask; + udata->chunk_block.offset = idx_info->layout->storage.u.chunk.idx_addr; + if (idx_info->layout->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) { + udata->chunk_block.length = idx_info->layout->storage.u.chunk.u.single.nbytes; + udata->filter_mask = idx_info->layout->storage.u.chunk.u.single.filter_mask; } /* end if */ else { - udata->chunk_block.length = idx_info->layout->size; + udata->chunk_block.length = idx_info->layout->u.chunk.size; udata->filter_mask = 0; } /* end else */ if (!H5_addr_defined(udata->chunk_block.offset)) @@ -380,21 +377,20 @@ H5D__single_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); assert(chunk_cb); assert(chunk_udata); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); /* Initialize generic chunk record */ memset(&chunk_rec, 0, sizeof(chunk_rec)); - chunk_rec.chunk_addr = idx_info->storage->idx_addr; + chunk_rec.chunk_addr = idx_info->layout->storage.u.chunk.idx_addr; - if (idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) { - chunk_rec.nbytes = idx_info->storage->u.single.nbytes; - chunk_rec.filter_mask = idx_info->storage->u.single.filter_mask; + if (idx_info->layout->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) { + chunk_rec.nbytes = idx_info->layout->storage.u.chunk.u.single.nbytes; + chunk_rec.filter_mask = idx_info->layout->storage.u.chunk.u.single.filter_mask; } /* end if */ else { - chunk_rec.nbytes = idx_info->layout->size; + chunk_rec.nbytes = idx_info->layout->u.chunk.size; chunk_rec.filter_mask = 0; } /* end else */ @@ -427,18 +423,17 @@ H5D__single_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); - if (idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) - nbytes = idx_info->storage->u.single.nbytes; + if (idx_info->layout->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) + nbytes = idx_info->layout->storage.u.chunk.u.single.nbytes; else - nbytes = idx_info->layout->size; + nbytes = idx_info->layout->u.chunk.size; - if (H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->storage->idx_addr, nbytes) < 0) + if (H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->layout->storage.u.chunk.idx_addr, nbytes) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "unable to free dataset chunks"); - idx_info->storage->idx_addr = HADDR_UNDEF; + idx_info->layout->storage.u.chunk.idx_addr = HADDR_UNDEF; done: FUNC_LEAVE_NOAPI(ret_value) @@ -467,12 +462,11 @@ H5D__single_idx_delete(const H5D_chk_idx_info_t *idx_info) assert(idx_info->f); assert(idx_info->pline); assert(idx_info->layout); - assert(idx_info->storage); - if (H5_addr_defined(idx_info->storage->idx_addr)) + if (H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)) ret_value = H5D__single_idx_remove(idx_info, NULL); else - assert(!H5_addr_defined(idx_info->storage->idx_addr)); + assert(!H5_addr_defined(idx_info->layout->storage.u.chunk.idx_addr)); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__single_idx_delete() */ @@ -500,14 +494,12 @@ H5D__single_idx_copy_setup(const H5D_chk_idx_info_t H5_ATTR_NDEBUG_UNUSED *idx_i assert(idx_info_src->f); assert(idx_info_src->pline); assert(idx_info_src->layout); - assert(idx_info_src->storage); - assert(H5_addr_defined(idx_info_src->storage->idx_addr)); + assert(H5_addr_defined(idx_info_src->layout->storage.u.chunk.idx_addr)); assert(idx_info_dst); assert(idx_info_dst->f); assert(idx_info_dst->pline); assert(idx_info_dst->layout); - assert(idx_info_dst->storage); /* Set copied metadata tag */ H5_BEGIN_TAG(H5AC__COPIED_TAG) diff --git a/src/H5Olayout.c b/src/H5Olayout.c index 9828bc39e51..2e44712be44 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -100,7 +100,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); mesg->version = *p++; - if (mesg->version < H5O_LAYOUT_VERSION_1 || mesg->version > H5O_LAYOUT_VERSION_4) + if (mesg->version < H5O_LAYOUT_VERSION_1 || mesg->version > H5O_LAYOUT_VERSION_LATEST) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad version number for layout message"); if (mesg->version < H5O_LAYOUT_VERSION_3) { @@ -946,7 +946,7 @@ H5O__layout_delete(H5F_t *f, H5O_t *open_oh, void *_mesg) case H5D_CHUNKED: /* Chunked blocks on disk */ /* Free the file space for the index & chunk raw data */ - if (H5D__chunk_delete(f, open_oh, &mesg->storage) < 0) + if (H5D__chunk_delete(f, open_oh, mesg) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free raw data"); break; @@ -1080,9 +1080,8 @@ H5O__layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, bool H5_ (cpy_info->shared_fo && H5D__chunk_is_data_cached((const H5D_shared_t *)cpy_info->shared_fo))) { /* Create chunked layout */ - if (H5D__chunk_copy(file_src, &layout_src->storage.u.chunk, &layout_src->u.chunk, file_dst, - &layout_dst->storage.u.chunk, udata->src_space_extent, udata->src_dtype, - udata->common.src_pline, cpy_info) < 0) + if (H5D__chunk_copy(file_src, layout_src, file_dst, layout_dst, udata->src_space_extent, + udata->src_dtype, udata->common.src_pline, cpy_info) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, NULL, "unable to copy chunked storage"); } /* end if */ break; diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index ba9901e28e8..f5bffa61fe7 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -441,12 +441,18 @@ typedef struct H5O_efl_t { */ #define H5O_LAYOUT_VERSION_4 4 +/* This version uses a "size of lengths" size field to encode the sizes of + * filtered dataset chunks. This has a small file space penalty but prevents + * errors when a filter grows a dataset chunk. + */ +#define H5O_LAYOUT_VERSION_5 5 + /* The default version of the format. (Earlier versions had bugs) */ #define H5O_LAYOUT_VERSION_DEFAULT H5O_LAYOUT_VERSION_3 /* The latest version of the format. Look through the 'encode' * and 'size' callbacks for places to change when updating this. */ -#define H5O_LAYOUT_VERSION_LATEST H5O_LAYOUT_VERSION_4 +#define H5O_LAYOUT_VERSION_LATEST H5O_LAYOUT_VERSION_5 /* Forward declaration of structs used below */ struct H5D_layout_ops_t; /* Defined in H5Dpkg.h */ diff --git a/test/dsets.c b/test/dsets.c index 9047973c95c..8f72b2ebae0 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -79,6 +79,7 @@ static const char *FILENAME[] = {"dataset", /* 0 */ "h5s_block", /* 27 */ "h5s_plist", /* 28 */ "vds_strings", /* 29 */ + "chunk_expand2", /* 30 */ NULL}; #define OHMIN_FILENAME_A "ohdr_min_a" @@ -187,6 +188,7 @@ static const char *FILENAME[] = {"dataset", /* 0 */ #define H5Z_FILTER_EXPAND 310 #define H5Z_FILTER_CAN_APPLY_TEST2 311 #define H5Z_FILTER_COUNT 312 +#define H5Z_FILTER_EXPAND2 313 /* Flags for testing filters */ #define DISABLE_FLETCHER32 0 @@ -262,11 +264,13 @@ static const char *FILENAME[] = {"dataset", /* 0 */ #define BYPASS_FILL_VALUE 7 /* Parameters for testing extensible array chunk indices */ -#define EARRAY_MAX_RANK 3 -#define EARRAY_DSET_DIM 15 -#define EARRAY_CHUNK_DIM 3 -#define EARRAY_EXTEND_INCR 15 -#define EARRAY_MAX_EXTEND 75 +#define EARRAY_MAX_RANK 2 +#define EARRAY_MAX_RANK_EXHAUST 3 +#define EARRAY_DSET_DIM 15 +#define EARRAY_CHUNK_DIM 3 +#define EARRAY_EXTEND_INCR 15 +#define EARRAY_MAX_EXTEND 30 +#define EARRAY_MAX_EXTEND_EXHAUST 75 /* Parameters for datasets in query storage size tests */ #define STORAGE_SIZE_DIM1 12 @@ -327,6 +331,8 @@ static size_t filter_corrupt(unsigned int flags, size_t cd_nelmts, const unsigne size_t nbytes, size_t *buf_size, void **buf); static size_t filter_expand(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf); +static size_t filter_expand2(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_values, + size_t nbytes, size_t *buf_size, void **buf); static size_t filter_count(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf); @@ -7686,7 +7692,7 @@ test_zero_dims(hid_t file) FAIL_STACK_ERROR; /* Verify index type */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index"); } @@ -7769,7 +7775,7 @@ test_zero_dims(hid_t file) FAIL_STACK_ERROR; /* Verify index type */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (idx_type != H5D_CHUNK_IDX_BT2) FAIL_PUTS_ERROR("should be using v2 B-tree as index"); } @@ -7935,7 +7941,7 @@ test_missing_chunk(hid_t file) TEST_ERROR; /* Verify index type */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using Extensible Array as index"); if (idx_type2 != H5D_CHUNK_IDX_BT2) @@ -8254,7 +8260,7 @@ test_random_chunks_real(const char *testname, bool early_alloc, hid_t fapl) TEST_ERROR; /* Verify index type */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (early_alloc) { if (idx_type != H5D_CHUNK_IDX_NONE) FAIL_PUTS_ERROR("should be using Non-Index as index"); @@ -8337,7 +8343,7 @@ test_random_chunks_real(const char *testname, bool early_alloc, hid_t fapl) TEST_ERROR; /* Verify index type */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (early_alloc) { if (idx_type != H5D_CHUNK_IDX_NONE) FAIL_PUTS_ERROR("should be using implicit indexing"); @@ -8473,7 +8479,7 @@ test_random_chunks_real(const char *testname, bool early_alloc, hid_t fapl) TEST_ERROR; /* Verify index type */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (early_alloc) { if (idx_type != H5D_CHUNK_IDX_NONE) FAIL_PUTS_ERROR("should be using Non-Index as index"); @@ -9402,7 +9408,7 @@ test_big_chunks_bypass_cache(hid_t fapl) FAIL_STACK_ERROR; /* Chunk index type expected depends on whether we are using the latest version of the format */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { /* Verify index type */ if (idx_type != H5D_CHUNK_IDX_FARRAY) FAIL_PUTS_ERROR("should be using Fixed Array as index"); @@ -9647,22 +9653,30 @@ static herr_t test_chunk_fast(const char *driver_name, hid_t fapl) { char filename[FILENAME_BUF_SIZE]; - hid_t fid = H5I_INVALID_HID; /* File ID */ - hid_t my_fapl = H5I_INVALID_HID; /* File access property list ID */ - hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list ID */ - hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ - hid_t scalar_sid = H5I_INVALID_HID; /* Scalar dataspace ID */ - hid_t dsid = H5I_INVALID_HID; /* Dataset ID */ - hsize_t fill; /* Temporary value, for filling arrays */ - hsize_t hs_size[EARRAY_MAX_RANK]; /* Hyperslab size */ - hsize_t chunk_dim[EARRAY_MAX_RANK]; /* Chunk dimensions */ - H5F_libver_t low; /* File format low bound */ - unsigned swmr; /* Whether file should be written with SWMR access enabled */ + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t my_fapl = H5I_INVALID_HID; /* File access property list ID */ + hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list ID */ + hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ + hid_t scalar_sid = H5I_INVALID_HID; /* Scalar dataspace ID */ + hid_t dsid = H5I_INVALID_HID; /* Dataset ID */ + hsize_t fill; /* Temporary value, for filling arrays */ + hsize_t hs_size[EARRAY_MAX_RANK_EXHAUST]; /* Hyperslab size */ + hsize_t chunk_dim[EARRAY_MAX_RANK_EXHAUST]; /* Chunk dimensions */ + H5F_libver_t low; /* File format low bound */ + unsigned swmr; /* Whether file should be written with SWMR access enabled */ + unsigned max_rank; /* Maximum rank */ + int express_test; /* Express test setting */ TESTING("datasets w/extensible array as chunk index"); + assert(EARRAY_MAX_RANK_EXHAUST >= EARRAY_MAX_RANK); + h5_fixname(FILENAME[10], fapl, filename, sizeof filename); + /* Set max rank based on express_test setting */ + express_test = h5_get_testexpress(); + max_rank = express_test <= H5_TEST_EXPRESS_EXHAUSTIVE ? EARRAY_MAX_RANK_EXHAUST : EARRAY_MAX_RANK; + /* Copy the file access property list */ if ((my_fapl = H5Pcopy(fapl)) < 0) FAIL_STACK_ERROR; @@ -9691,11 +9705,11 @@ test_chunk_fast(const char *driver_name, hid_t fapl) /* Initialize chunk dimensions */ fill = EARRAY_CHUNK_DIM; - H5VM_array_fill(chunk_dim, &fill, sizeof(fill), EARRAY_MAX_RANK); + H5VM_array_fill(chunk_dim, &fill, sizeof(fill), max_rank); /* Initialize hyperslab size */ fill = 1; - H5VM_array_fill(hs_size, &fill, sizeof(fill), EARRAY_MAX_RANK); + H5VM_array_fill(hs_size, &fill, sizeof(fill), max_rank); /* Loop over using SWMR access to write */ for (swmr = 0; swmr <= 1; swmr++) { @@ -9725,7 +9739,7 @@ test_chunk_fast(const char *driver_name, hid_t fapl) unsigned ndims; /* Current # of dims to test */ /* Loop over dataspace ranks to test */ - for (ndims = 1; ndims < (EARRAY_MAX_RANK + 1); ndims++) { + for (ndims = 1; ndims < (max_rank + 1); ndims++) { unsigned unlim_dim; /* Create dataset creation property list */ @@ -9753,16 +9767,17 @@ test_chunk_fast(const char *driver_name, hid_t fapl) /* Loop over which dimension is unlimited */ for (unlim_dim = 0; unlim_dim < ndims; unlim_dim++) { - H5D_chunk_index_t idx_type; /* Dataset chunk index type */ - hsize_t dim[EARRAY_MAX_RANK], max_dim[EARRAY_MAX_RANK]; /* Dataset dimensions */ - hsize_t swizzled_dim[EARRAY_MAX_RANK]; /* Dimensions, with unlimited dimension moved - to rank 0 */ - hsize_t down[EARRAY_MAX_RANK]; /* 'down' sizes, for computing array index */ - hsize_t hs_offset[EARRAY_MAX_RANK]; /* Hyperslab offset */ - hssize_t snpoints; /* # of points in dataspace extent (signed) */ - hsize_t npoints; /* # of points in dataspace extent */ - unsigned write_elem, read_elem; /* Element written/read */ - hsize_t u; /* Local index variable */ + H5D_chunk_index_t idx_type; /* Dataset chunk index type */ + hsize_t dim[EARRAY_MAX_RANK_EXHAUST], + max_dim[EARRAY_MAX_RANK_EXHAUST]; /* Dataset dimensions */ + hsize_t swizzled_dim[EARRAY_MAX_RANK_EXHAUST]; /* Dimensions, with unlimited dimension + moved to rank 0 */ + hsize_t down[EARRAY_MAX_RANK_EXHAUST]; /* 'down' sizes, for computing array index */ + hsize_t hs_offset[EARRAY_MAX_RANK_EXHAUST]; /* Hyperslab offset */ + hssize_t snpoints; /* # of points in dataspace extent (signed) */ + hsize_t npoints; /* # of points in dataspace extent */ + unsigned write_elem, read_elem; /* Element written/read */ + hsize_t u; /* Local index variable */ /* Create file */ if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (swmr ? H5F_ACC_SWMR_WRITE : 0), @@ -9771,12 +9786,12 @@ test_chunk_fast(const char *driver_name, hid_t fapl) /* Create n-D dataspace */ fill = EARRAY_DSET_DIM; - H5VM_array_fill(dim, &fill, sizeof(fill), EARRAY_MAX_RANK); + H5VM_array_fill(dim, &fill, sizeof(fill), max_rank); fill = EARRAY_DSET_DIM; - H5VM_array_fill(max_dim, &fill, sizeof(fill), EARRAY_MAX_RANK); + H5VM_array_fill(max_dim, &fill, sizeof(fill), max_rank); max_dim[unlim_dim] = H5S_UNLIMITED; fill = EARRAY_DSET_DIM; - H5VM_array_fill(swizzled_dim, &fill, sizeof(fill), EARRAY_MAX_RANK); + H5VM_array_fill(swizzled_dim, &fill, sizeof(fill), max_rank); if ((sid = H5Screate_simple((int)ndims, dim, max_dim)) < 0) FAIL_STACK_ERROR; @@ -9799,7 +9814,7 @@ test_chunk_fast(const char *driver_name, hid_t fapl) /* Chunk index type expected depends on whether we are using the latest version of the * format */ - if (low == H5F_LIBVER_LATEST || swmr) { + if (low >= H5F_LIBVER_V110 || swmr) { /* Verify index type */ if (idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index"); @@ -9849,7 +9864,9 @@ test_chunk_fast(const char *driver_name, hid_t fapl) } /* end for */ /* Incrementally extend dataset and verify write/reads */ - while (dim[unlim_dim] < EARRAY_MAX_EXTEND) { + while (dim[unlim_dim] < (hsize_t)(express_test <= H5_TEST_EXPRESS_EXHAUSTIVE + ? EARRAY_MAX_EXTEND_EXHAUST + : EARRAY_MAX_EXTEND)) { hssize_t snew_npoints; /* # of points in dataspace extent (signed) */ hsize_t new_npoints; /* # of points in dataspace extent */ @@ -9940,7 +9957,7 @@ test_chunk_fast(const char *driver_name, hid_t fapl) /* Chunk index tyepe expected depends on whether we are using the latest version of * the format */ - if (low == H5F_LIBVER_LATEST || swmr) { + if (low >= H5F_LIBVER_V110 || swmr) { /* Verify index type */ if (idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index"); @@ -10425,70 +10442,70 @@ test_chunk_expand(hid_t fapl) /* Check if we are using the latest version of the format */ if (H5Pget_libver_bounds(fapl, &low, &high) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; - if (sizeof(size_t) <= 4 && low != H5F_LIBVER_LATEST) { + if (sizeof(size_t) <= 4 && low < H5F_LIBVER_V110) { SKIPPED(); puts(" Current machine can't test for error w/old file format"); } /* end if */ else { /* Register "expansion" filter */ if (H5Zregister(H5Z_EXPAND) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Check that the filter was registered */ if (true != H5Zfilter_avail(H5Z_FILTER_EXPAND)) - FAIL_STACK_ERROR; + TEST_ERROR; /* Loop over storage allocation time */ for (alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) { /* Create file */ if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create dataset creation property list */ if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl2 = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set chunking */ chunk_dim = chunk_dim2[0] = chunk_dim2[1] = 10; if (H5Pset_chunk(dcpl, 1, &chunk_dim) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_chunk(dcpl2, 2, chunk_dim2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set fill time */ if (H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_fill_time(dcpl2, H5D_FILL_TIME_ALLOC) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set allocation time */ if (H5Pset_alloc_time(dcpl, alloc_time) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_alloc_time(dcpl2, alloc_time) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set "expand" filter */ if (H5Pset_filter(dcpl, H5Z_FILTER_EXPAND, 0, (size_t)0, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_filter(dcpl2, H5Z_FILTER_EXPAND, 0, (size_t)0, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create scalar dataspace */ if ((scalar_sid = H5Screate(H5S_SCALAR)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create 1-D and 2-D dataspace */ dim = dim2[0] = dim2[1] = 100; max_dim = max_dim2[0] = max_dim2[1] = H5S_UNLIMITED; if ((sid = H5Screate_simple(1, &dim, &max_dim)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((sid2 = H5Screate_simple(2, dim2, max_dim2)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create 1-D & 2-D chunked datasets */ if (H5D_ALLOC_TIME_EARLY == alloc_time) { @@ -10515,21 +10532,21 @@ test_chunk_expand(hid_t fapl) else { if ((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dsid2 = H5Dcreate2(fid, "dset2", H5T_NATIVE_UINT, sid2, H5P_DEFAULT, dcpl2, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Get the chunk index type */ if (H5D__layout_idx_type_test(dsid, &idx_type) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5D__layout_idx_type_test(dsid2, &idx_type2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Chunk index type expected depends on whether we are using the latest version of the format */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { /* Verify index type */ if (idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index"); @@ -10552,18 +10569,18 @@ test_chunk_expand(hid_t fapl) /* Select a single element in the 1-D dataset */ if (H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Select a single element in the 2-D dataset; NOT every element is selected */ if (H5Sselect_hyperslab(sid2, H5S_SELECT_SET, hs_offset2, NULL, hs_size2, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read (unwritten) element from dataset */ read_elem = read_elem2 = 1; if (H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify unwritten element is fill value (0) */ if (read_elem != 0) @@ -10577,17 +10594,17 @@ test_chunk_expand(hid_t fapl) /* Write element to the datasets */ write_elem = write_elem2 = u; if (H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read element from the datasets */ read_elem = write_elem + 1; read_elem2 = write_elem2 + 1; if (H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify written element is read in */ if (read_elem != write_elem) @@ -10625,21 +10642,21 @@ test_chunk_expand(hid_t fapl) dim2[0] += 100; dim2[1] += 100; if (H5Dset_extent(dsid, &dim) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dset_extent(dsid2, dim2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Close old dataspace */ if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(sid2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Get dataspace for the datasets now */ if ((sid = H5Dget_space(dsid)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((sid2 = H5Dget_space(dsid2)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Fill new elements */ hs_size = hs_size2[0] = hs_size2[1] = 1; @@ -10649,16 +10666,16 @@ test_chunk_expand(hid_t fapl) hs_offset2[0] = (dim2[0] + u) - 100; hs_offset2[1] = (dim2[1] + u) - 100; if (H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sselect_hyperslab(sid2, H5S_SELECT_SET, hs_offset2, NULL, hs_size2, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read (unwritten) element from the datasets */ read_elem = read_elem2 = 1; if (H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify unwritten element is fill value (0) */ if (read_elem != 0) @@ -10672,17 +10689,17 @@ test_chunk_expand(hid_t fapl) /* Write element to the datasets */ write_elem = write_elem2 = u; if (H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read element from the datasets */ read_elem = write_elem + 1; read_elem2 = write_elem2 + 1; if (H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify written element is read in */ if (read_elem != write_elem) @@ -10719,46 +10736,46 @@ test_chunk_expand(hid_t fapl) /* Close the datasets */ if (H5Dclose(dsid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dsid2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* end else */ /* Close everything */ if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(sid2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(scalar_sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Fclose(fid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* If the dataset was created, do some extra testing */ if (H5D_ALLOC_TIME_EARLY != alloc_time) { /* Re-open file & datasets */ if ((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Open the datasets */ if ((dsid = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dsid2 = H5Dopen2(fid, "dset2", H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Get the chunk index type for the two datasets */ if (H5D__layout_idx_type_test(dsid, &idx_type) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5D__layout_idx_type_test(dsid2, &idx_type2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Chunk index type expected depends on whether we are using the latest version of the format */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { /* Verify index type */ if (idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index"); @@ -10775,13 +10792,13 @@ test_chunk_expand(hid_t fapl) /* Create scalar dataspace */ if ((scalar_sid = H5Screate(H5S_SCALAR)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Get dataspace for the datasets now */ if ((sid = H5Dget_space(dsid)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((sid2 = H5Dget_space(dsid2)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read elements */ hs_size = hs_size2[0] = hs_size2[1] = 1; @@ -10789,16 +10806,16 @@ test_chunk_expand(hid_t fapl) /* Select a single element in the datasets */ hs_offset = hs_offset2[0] = hs_offset2[1] = u; if (H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sselect_hyperslab(sid2, H5S_SELECT_SET, hs_offset2, NULL, hs_size2, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read element from the datasets */ read_elem = read_elem2 = u + 1; if (H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify unwritten element is proper value */ if (read_elem != (u % 100)) @@ -10812,17 +10829,17 @@ test_chunk_expand(hid_t fapl) /* Write element to the datasets */ write_elem = write_elem2 = u % 100; if (H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read element from the datasets */ read_elem = write_elem + 1; read_elem2 = write_elem2 + 1; if (H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify written element is read in */ if (read_elem != write_elem) @@ -10855,41 +10872,41 @@ test_chunk_expand(hid_t fapl) /* Close everything */ if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(sid2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(scalar_sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dsid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dsid2) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Fclose(fid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Re-open file */ if ((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Delete the datasets */ if (H5Ldelete(fid, "dset", H5P_DEFAULT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Ldelete(fid, "dset2", H5P_DEFAULT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Close everything */ if (H5Fclose(fid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* end if */ } /* end for */ /* Unregister "expansion" filter */ if (H5Zunregister(H5Z_FILTER_EXPAND) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Check that the filter was unregistered */ if (false != H5Zfilter_avail(H5Z_FILTER_EXPAND)) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); } /* end else */ @@ -10907,11 +10924,431 @@ test_chunk_expand(hid_t fapl) H5Sclose(sid2); H5Sclose(scalar_sid); H5Fclose(fid); + H5Zunregister(H5Z_FILTER_EXPAND); } H5E_END_TRY return FAIL; } /* end test_chunk_expand() */ +/* This message derives from H5Z */ +const H5Z_class2_t H5Z_EXPAND2[1] = {{ + H5Z_CLASS_T_VERS, /* H5Z_class_t version */ + H5Z_FILTER_EXPAND2, /* Filter id number */ + 1, 1, /* Encoding and decoding enabled */ + "expand2", /* Filter name for debugging */ + NULL, /* The "can apply" callback */ + NULL, /* The "set local" callback */ + filter_expand2, /* The actual filter function */ +}}; + +/* "Expansion factor" for filter_expand() routine */ +#define FILTER_EXPAND2_FACTOR (256 * 256) + +/*------------------------------------------------------------------------- + * Function: filter_expand2 + * + * Purpose: For testing library's behavior when a filter expands a + * chunk by an amount that would be too much for the old file + * format. + * + * Note: Unlike filter_expand, this filter does actually allocate a + * new buffer and is fully functional. Care should be taken + * to ensure memory and file usage stays under control. + * + * Return: Success: Data chunk size + * Failure: 0 + * + *------------------------------------------------------------------------- + */ +static size_t +filter_expand2(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts, + const unsigned int H5_ATTR_UNUSED *cd_values, size_t nbytes, size_t *buf_size, void **buf) +{ + size_t ret_value = 0; + + if (flags & H5Z_FLAG_REVERSE) + /* Shrink nbytes by a factor of FILTER_EXPAND2_FACTOR */ + ret_value = nbytes / FILTER_EXPAND2_FACTOR; + else { + void *new_buf = NULL; + const size_t new_nbytes = nbytes * FILTER_EXPAND2_FACTOR; + + /* Expand the buffer size beyond what can be encoded. To do this first calloc a new buffer then copy + * the used bytes over, so we don't get warnings about writing uninitialized bytes to disk */ + if (*buf_size < new_nbytes) { + if (NULL == (new_buf = calloc(new_nbytes, 1))) + return 0; + memcpy(new_buf, *buf, nbytes); + free(*buf); + *buf = new_buf; + new_buf = NULL; + *buf_size = new_nbytes; + } + + ret_value = new_nbytes; + } + + return ret_value; +} /* end filter_expand2() */ + +/*------------------------------------------------------------------------- + * Function: test_chunk_expand2 + * + * Purpose: Tests that, when using the new file format (2.0+), filters + * can expand a chunk by an amount that would be too much for + * the old file format. + * + * Return: Success: 0 + * Failure: -1 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_chunk_expand2(hid_t fapl) +{ + char filename[FILENAME_BUF_SIZE]; + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t dcpl[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, + H5I_INVALID_HID}; /* Dataset creation property list IDs */ + hid_t sid[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; /* Dataspace IDs */ + hid_t scalar_sid = H5I_INVALID_HID; /* Scalar dataspace ID */ + hid_t dsid[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; /* Dataset ID */ + hsize_t dim[2], max_dim[2], chunk_dim[2]; /* Dataset and chunk dimensions */ + H5D_chunk_index_t idx_type; /* Dataset chunk index type */ + H5F_libver_t low, high; /* File format bounds */ + hsize_t hs_offset[2]; /* Hyperslab offset */ + hsize_t hs_size[2]; /* Hyperslab size */ + H5D_alloc_time_t alloc_time; /* Storage allocation time */ + unsigned write_elem, read_elem; /* Element written/read */ + unsigned i, u; /* Local index variables */ + + TESTING("filter expanding chunks too much succeeds with new format"); + + h5_fixname(FILENAME[30], fapl, filename, sizeof filename); + + /* Check if we are using the latest version of the format */ + if (H5Pget_libver_bounds(fapl, &low, &high) < 0) + TEST_ERROR; + assert(low >= H5F_LIBVER_V200); + + /* Register "expansion 2" filter */ + if (H5Zregister(H5Z_EXPAND2) < 0) + TEST_ERROR; + + /* Check that the filter was registered */ + if (true != H5Zfilter_avail(H5Z_FILTER_EXPAND2)) + TEST_ERROR; + + /* Loop over storage allocation time */ + for (alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) { + + /* Create file */ + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR; + + /* Create dataset creation property lists */ + for (i = 0; i < sizeof(dcpl) / sizeof(dcpl[0]); i++) + if ((dcpl[i] = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + /* Set chunking */ + chunk_dim[0] = chunk_dim[1] = 2; + for (i = 0; i < sizeof(dcpl) / sizeof(dcpl[0]); i++) + if (H5Pset_chunk(dcpl[i], 2, chunk_dim) < 0) + TEST_ERROR; + + /* Set fill time */ + for (i = 0; i < sizeof(dcpl) / sizeof(dcpl[0]); i++) + if (H5Pset_fill_time(dcpl[i], H5D_FILL_TIME_ALLOC) < 0) + TEST_ERROR; + + /* Set allocation time */ + for (i = 0; i < sizeof(dcpl) / sizeof(dcpl[0]); i++) + if (H5Pset_alloc_time(dcpl[i], alloc_time) < 0) + TEST_ERROR; + + /* Set "expand" filter */ + for (i = 0; i < sizeof(dcpl) / sizeof(dcpl[0]); i++) + if (H5Pset_filter(dcpl[i], H5Z_FILTER_EXPAND2, 0, (size_t)0, NULL) < 0) + TEST_ERROR; + + /* Create scalar dataspace */ + if ((scalar_sid = H5Screate(H5S_SCALAR)) < 0) + TEST_ERROR; + + /* Create single chunk dataspace */ + dim[0] = dim[1] = 2; + max_dim[0] = max_dim[1] = 2; + if ((sid[0] = H5Screate_simple(2, dim, max_dim)) < 0) + TEST_ERROR; + + /* Create fixed array dataspace */ + dim[0] = dim[1] = 4; + max_dim[0] = max_dim[1] = 12; + if ((sid[1] = H5Screate_simple(2, dim, max_dim)) < 0) + TEST_ERROR; + + /* Create extensible array dataspace */ + max_dim[0] = H5S_UNLIMITED; + max_dim[1] = 12; + if ((sid[2] = H5Screate_simple(2, dim, max_dim)) < 0) + TEST_ERROR; + + /* Create v2 b-tree dataspace */ + max_dim[0] = H5S_UNLIMITED; + max_dim[1] = H5S_UNLIMITED; + if ((sid[3] = H5Screate_simple(2, dim, max_dim)) < 0) + TEST_ERROR; + + /* Create chunked datasets */ + for (i = 0; i < sizeof(dsid) / sizeof(dsid[0]); i++) { + char dsetname[8]; + + snprintf(dsetname, sizeof(dsetname), "dset%u", i); + if ((dsid[i] = H5Dcreate2(fid, dsetname, H5T_NATIVE_UINT, sid[i], H5P_DEFAULT, dcpl[i], + H5P_DEFAULT)) < 0) + TEST_ERROR; + } + + /* Verify layout types */ + if (H5D__layout_idx_type_test(dsid[0], &idx_type) < 0) + TEST_ERROR; + if (idx_type != H5D_CHUNK_IDX_SINGLE) + FAIL_PUTS_ERROR("should be using single chunk as index"); + if (H5D__layout_idx_type_test(dsid[1], &idx_type) < 0) + TEST_ERROR; + if (idx_type != H5D_CHUNK_IDX_FARRAY) + FAIL_PUTS_ERROR("should be using fixed array as index"); + if (H5D__layout_idx_type_test(dsid[2], &idx_type) < 0) + TEST_ERROR; + if (idx_type != H5D_CHUNK_IDX_EARRAY) + FAIL_PUTS_ERROR("should be using extensible array as index"); + if (H5D__layout_idx_type_test(dsid[3], &idx_type) < 0) + TEST_ERROR; + if (idx_type != H5D_CHUNK_IDX_BT2) + FAIL_PUTS_ERROR("should be using v2 B-tree as index"); + + /* + * Fill elements + */ + hs_size[0] = hs_size[1] = 1; + + /* Iterate over datasets */ + for (i = 0; i < sizeof(dsid) / sizeof(dsid[0]); i++) { + /* Iterate over elements */ + for (u = 0; u < 4; u++) { + /* Don't read past end of single chunk dataset */ + if (i == 0 && u >= 2) + break; + + hs_offset[0] = hs_offset[1] = u; + + /* Select a single element in the 2-D dataset; NOT every element is selected */ + if (H5Sselect_hyperslab(sid[i], H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL) < 0) + TEST_ERROR; + + /* Read (unwritten) element from dataset */ + read_elem = 1; + if (H5Dread(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &read_elem) < 0) + TEST_ERROR; + + /* Verify unwritten element is fill value (0) */ + if (read_elem != 0) + FAIL_PUTS_ERROR("invalid unwritten element read"); + + /* Write element to the dataset */ + write_elem = u; + if (H5Dwrite(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &write_elem) < 0) + TEST_ERROR; + + /* Read element from the dataset */ + read_elem = write_elem + 1; + if (H5Dread(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &read_elem) < 0) + TEST_ERROR; + + /* Verify written element is read in */ + if (read_elem != write_elem) + FAIL_PUTS_ERROR("invalid written element read"); + } + } + + /* Iterate over datasets. Do not expand single chunk dataset. */ + for (i = 1; i < sizeof(dsid) / sizeof(dsid[0]); i++) { + dim[0] = dim[1] = 4; + + /* Incrementally extend dataset and verify write/reads */ + while (dim[0] < 12) { + /* Extend the datasets */ + dim[0] += 4; + dim[1] += 4; + if (H5Dset_extent(dsid[i], dim) < 0) + TEST_ERROR; + + /* Close old dataspace */ + if (H5Sclose(sid[i]) < 0) + TEST_ERROR; + + /* Get dataspace for the datasets now */ + if ((sid[i] = H5Dget_space(dsid[i])) < 0) + TEST_ERROR; + + /* Fill new elements */ + hs_size[0] = hs_size[1] = 1; + for (u = 0; u < 4; u++) { + /* Select a single element in the dataset */ + hs_offset[0] = (dim[0] + u) - 4; + hs_offset[1] = (dim[1] + u) - 4; + if (H5Sselect_hyperslab(sid[i], H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL) < 0) + TEST_ERROR; + + /* Read (unwritten) element from the dataset */ + read_elem = 1; + if (H5Dread(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &read_elem) < 0) + TEST_ERROR; + + /* Verify unwritten element is fill value (0) */ + if (read_elem != 0) + FAIL_PUTS_ERROR("invalid unwritten element read"); + + /* Write element to the datasets */ + write_elem = u; + if (H5Dwrite(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &write_elem) < 0) + TEST_ERROR; + + /* Read element from the datasets */ + read_elem = write_elem + 1; + if (H5Dread(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &read_elem) < 0) + TEST_ERROR; + + /* Verify written element is read in */ + if (read_elem != write_elem) + FAIL_PUTS_ERROR("invalid written element read"); + } + } + } + + /* Close the datasets, dcpls, dataspaces, and file */ + for (i = 0; i < sizeof(dsid) / sizeof(dsid[0]); i++) + if (H5Dclose(dsid[i]) < 0) + TEST_ERROR; + for (i = 0; i < sizeof(sid) / sizeof(sid[0]); i++) + if (H5Sclose(sid[i]) < 0) + TEST_ERROR; + for (i = 0; i < sizeof(dcpl) / sizeof(dcpl[0]); i++) + if (H5Pclose(dcpl[i]) < 0) + TEST_ERROR; + if (H5Fclose(fid) < 0) + TEST_ERROR; + + /* Re-open file */ + if ((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + TEST_ERROR; + + /* Iterate over datasets */ + for (i = 0; i < sizeof(dsid) / sizeof(dsid[0]); i++) { + char dsetname[8]; + + /* Open the dataset */ + snprintf(dsetname, sizeof(dsetname), "dset%u", i); + if ((dsid[i] = H5Dopen2(fid, dsetname, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Get dataspace for the dataset now */ + if ((sid[i] = H5Dget_space(dsid[i])) < 0) + TEST_ERROR; + + /* Read elements */ + hs_size[0] = hs_size[1] = 1; + for (u = 0; u < (i == 0 ? 2 : 12); u++) { + /* Select a single element in the datasets */ + hs_offset[0] = hs_offset[1] = u; + if (H5Sselect_hyperslab(sid[i], H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL) < 0) + TEST_ERROR; + + /* Read element from the datasets */ + read_elem = u + 1; + if (H5Dread(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &read_elem) < 0) + TEST_ERROR; + + /* Verify unwritten element is proper value */ + if (read_elem != (u % 4)) + FAIL_PUTS_ERROR("invalid element read"); + + /* Write element to the datasets */ + write_elem = u % 4 + 1; + if (H5Dwrite(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &write_elem) < 0) + TEST_ERROR; + + /* Read element from the datasets */ + read_elem = write_elem + 1; + if (H5Dread(dsid[i], H5T_NATIVE_UINT, scalar_sid, sid[i], H5P_DEFAULT, &read_elem) < 0) + TEST_ERROR; + + /* Verify written element is read in */ + if (read_elem != write_elem) + FAIL_PUTS_ERROR("invalid written element read"); + } + } + + /* Close everything */ + for (i = 0; i < sizeof(dsid) / sizeof(dsid[0]); i++) + if (H5Dclose(dsid[i]) < 0) + TEST_ERROR; + for (i = 0; i < sizeof(sid) / sizeof(sid[0]); i++) + if (H5Sclose(sid[i]) < 0) + TEST_ERROR; + if (H5Fclose(fid) < 0) + TEST_ERROR; + if (H5Sclose(scalar_sid) < 0) + TEST_ERROR; + + /* Re-open file */ + if ((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + TEST_ERROR; + + /* Delete the datasets */ + for (i = 0; i < sizeof(dsid) / sizeof(dsid[0]); i++) { + char dsetname[8]; + + snprintf(dsetname, sizeof(dsetname), "dset%u", i); + if (H5Ldelete(fid, dsetname, H5P_DEFAULT) < 0) + TEST_ERROR; + } + + /* Close file */ + if (H5Fclose(fid) < 0) + TEST_ERROR; + } + + /* Unregister "expansion 2" filter */ + if (H5Zunregister(H5Z_FILTER_EXPAND2) < 0) + TEST_ERROR; + + /* Check that the filter was unregistered */ + if (false != H5Zfilter_avail(H5Z_FILTER_EXPAND2)) + TEST_ERROR; + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + for (i = 0; i < sizeof(dsid) / sizeof(dsid[0]); i++) + H5Dclose(dsid[i]); + for (i = 0; i < sizeof(sid) / sizeof(sid[0]); i++) + H5Sclose(sid[i]); + for (i = 0; i < sizeof(dcpl) / sizeof(dcpl[0]); i++) + H5Pclose(dcpl[i]); + H5Sclose(scalar_sid); + H5Fclose(fid); + H5Zunregister(H5Z_FILTER_EXPAND2); + } + H5E_END_TRY + return FAIL; +} /* end test_chunk_expand2() */ + /*------------------------------------------------------------------------- * Function: test_fixed_array * @@ -11123,7 +11560,7 @@ test_fixed_array(hid_t fapl) FAIL_STACK_ERROR; /* Chunk index type depends on whether we are using the latest version of the format */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (alloc_time == H5D_ALLOC_TIME_EARLY #ifdef H5_HAVE_FILTER_DEFLATE && !compress @@ -11172,7 +11609,7 @@ test_fixed_array(hid_t fapl) FAIL_STACK_ERROR; /* Chunk index type depends on whether we are using the latest version of the format */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (alloc_time == H5D_ALLOC_TIME_EARLY #ifdef H5_HAVE_FILTER_DEFLATE && !compress @@ -11222,7 +11659,7 @@ test_fixed_array(hid_t fapl) FAIL_STACK_ERROR; /* Chunk index type depends on whether we are using the latest version of the format */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (alloc_time == H5D_ALLOC_TIME_EARLY #ifdef H5_HAVE_FILTER_DEFLATE && !compress @@ -11601,7 +12038,7 @@ test_single_chunk(hid_t fapl) FAIL_STACK_ERROR; /* Chunk index type depends on whether we are using the latest version of the format */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (idx_type != H5D_CHUNK_IDX_SINGLE) FAIL_PUTS_ERROR("should be using Single Chunk indexing"); } /* end if */ @@ -11632,7 +12069,7 @@ test_single_chunk(hid_t fapl) FAIL_STACK_ERROR; /* Chunk index type depends on whether we are using the latest version of the format */ - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { if (idx_type != H5D_CHUNK_IDX_SINGLE) FAIL_PUTS_ERROR("should be using Single Chunk indexing"); } @@ -12247,7 +12684,7 @@ test_swmr_non_latest(const char *driver_name, hid_t fapl) h5_fixname(FILENAME[18], fapl, filename, sizeof filename); - if (low == H5F_LIBVER_LATEST) { + if (low >= H5F_LIBVER_V110) { /* Create file with write+latest-format */ if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR; @@ -15745,13 +16182,14 @@ test_0sized_dset_metadata_alloc(hid_t fapl_id) * *------------------------------------------------------------------------- */ -#define VLEN_DS_NAME "test_downsize_vlen_scalar_dataset" -#define VLEN_DS_DIM 100 -#define VLEN_DS_STRLEN 20 -#define VLEN_DS_STRING "vlen test string" -#define VLEN_DS_VALUE 0.12345678901234567890 -#define VLEN_DS_ARRAY_DIM1 3 -#define VLEN_DS_ARRAY_DIM2 5 +#define VLEN_DS_NAME "test_downsize_vlen_scalar_dataset" +#define VLEN_DS_DIM 10 +#define VLEN_DS_DIM_EXHAUST 100 +#define VLEN_DS_STRLEN 20 +#define VLEN_DS_STRING "vlen test string" +#define VLEN_DS_VALUE 0.12345678901234567890 +#define VLEN_DS_ARRAY_DIM1 3 +#define VLEN_DS_ARRAY_DIM2 5 typedef struct { double value; @@ -15781,13 +16219,19 @@ test_downsize_vlen_scalar_dataset(hid_t file) vlen_ds_compound_memory_t *compound_data = NULL; /* Contents of VLEN data */ char common_string[VLEN_DS_STRLEN]; /* Common string contents */ hsize_t array_dims[2] = {VLEN_DS_ARRAY_DIM1, VLEN_DS_ARRAY_DIM2}; + int ds_dim; /* Initial dataset size */ + int express_test; /* Express test setting */ int i, dim1, dim2; /* Local index variables */ TESTING("H5Dwrite() on down-sized VLEN contents"); + /* Set dataset initial size based on express_test setting */ + express_test = h5_get_testexpress(); + ds_dim = express_test <= H5_TEST_EXPRESS_EXHAUSTIVE ? VLEN_DS_DIM_EXHAUST : VLEN_DS_DIM; + /* Allocate space for compound data */ if (NULL == (compound_data = - (vlen_ds_compound_memory_t *)malloc(VLEN_DS_DIM * sizeof(vlen_ds_compound_memory_t)))) + (vlen_ds_compound_memory_t *)malloc((size_t)ds_dim * sizeof(vlen_ds_compound_memory_t)))) TEST_ERROR; /* Create scalar dataspace */ @@ -15836,7 +16280,7 @@ test_downsize_vlen_scalar_dataset(hid_t file) */ strcpy(common_string, VLEN_DS_STRING); - for (i = 0; i < VLEN_DS_DIM; ++i) { + for (i = 0; i < ds_dim; ++i) { compound_data[i].value = VLEN_DS_VALUE; for (dim1 = 0; dim1 < VLEN_DS_ARRAY_DIM1; ++dim1) { for (dim2 = 0; dim2 < VLEN_DS_ARRAY_DIM2; ++dim2) { @@ -15852,7 +16296,7 @@ test_downsize_vlen_scalar_dataset(hid_t file) * arrays. */ /* Note: the bug in v1.8.14 is tripped on the second iteration, when 100 elements are over-written * with 99. */ - for (i = VLEN_DS_DIM; i > 0; --i) { + for (i = ds_dim; i > 0; --i) { vlen_compound_data.len = (size_t)i; vlen_compound_data.p = compound_data; if (H5Dwrite(scalar_did, vlen_compound_memory_tid, scalar_sid, scalar_sid, H5P_DEFAULT, @@ -17137,21 +17581,21 @@ test_vds_shared_strings(hid_t fapl) int main(void) { - char filename[FILENAME_BUF_SIZE]; - hid_t file, grp, fapl, fapl2; - hid_t fcpl = H5I_INVALID_HID, fcpl2 = H5I_INVALID_HID; - unsigned new_format; - unsigned paged; - unsigned minimized_ohdr; - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - int nerrors = 0; - const char *driver_name; - bool contig_addr_vfd; /* Whether VFD used has a contiguous address space */ - bool driver_is_default_compatible; - int i; + char filename[FILENAME_BUF_SIZE]; + hid_t file, grp, fapl; + hid_t fcpl = H5I_INVALID_HID, fcpl2 = H5I_INVALID_HID; + H5F_libver_t low; /* Low version bound */ + unsigned paged; + unsigned minimized_ohdr; + int mdc_nelmts; + size_t rdcc_nelmts; + size_t rdcc_nbytes; + double rdcc_w0; + int nerrors = 0; + const char *driver_name; + bool contig_addr_vfd; /* Whether VFD used has a contiguous address space */ + bool driver_is_default_compatible; + int i; /* Don't run this test using certain file drivers */ driver_name = h5_get_test_driver_name(); @@ -17209,14 +17653,6 @@ main(void) if (H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0) < 0) goto error; - /* Copy the file access property list */ - if ((fapl2 = H5Pcopy(fapl)) < 0) - TEST_ERROR; - - /* Set the "use the latest version of the format" bounds for creating objects in the file */ - if (H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) - TEST_ERROR; - /* create a file creation property list */ if ((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) TEST_ERROR; @@ -17239,36 +17675,29 @@ main(void) for (minimized_ohdr = false; minimized_ohdr <= true; minimized_ohdr++) { - /* Test with old & new format groups */ - for (new_format = false; new_format <= true; new_format++) { - hid_t my_fapl, my_fcpl; + /* Test all different low bounds for file format */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + hid_t my_fcpl; - /* Set the FAPL for the type of format */ - if (new_format) { - my_fapl = fapl2; - if (paged) { - my_fcpl = fcpl2; - puts("\nTesting with new file format and paged aggregation"); - } - else { - my_fcpl = fcpl; - puts("\nTesting with new file format and non-paged aggregation"); - } - } /* end if */ + /* Set version bounds */ + if (H5Pset_libver_bounds(fapl, low, H5F_LIBVER_LATEST) < 0) + TEST_ERROR; + + /* Print partial message about file format */ + printf("\nTesting with %s file format ", h5_get_version_string(low)); + + /* Set the FCPL and print the rest of the message depending on paged aggregation setting */ + if (paged) { + my_fcpl = fcpl2; + puts("and paged aggregation"); + } else { - my_fapl = fapl; - if (paged) { - my_fcpl = fcpl2; - puts("Testing with old file format and paged aggregation:"); - } - else { - my_fcpl = fcpl; - puts("Testing with old file format and non-paged aggregation:"); - } - } /* end else */ + my_fcpl = fcpl; + puts("and non-paged aggregation"); + } /* Create the file for this test */ - if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, my_fcpl, my_fapl)) < 0) + if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, my_fcpl, fapl)) < 0) goto error; if (true == minimized_ohdr) { @@ -17286,10 +17715,10 @@ main(void) goto error; nerrors += (test_create(file) < 0 ? 1 : 0); - nerrors += (test_simple_io(driver_name, my_fapl) < 0 ? 1 : 0); - nerrors += (test_compact_io(my_fapl) < 0 ? 1 : 0); - nerrors += (test_max_compact(my_fapl) < 0 ? 1 : 0); - nerrors += (test_compact_open_close_dirty(my_fapl) < 0 ? 1 : 0); + nerrors += (test_simple_io(driver_name, fapl) < 0 ? 1 : 0); + nerrors += (test_compact_io(fapl) < 0 ? 1 : 0); + nerrors += (test_max_compact(fapl) < 0 ? 1 : 0); + nerrors += (test_compact_open_close_dirty(fapl) < 0 ? 1 : 0); nerrors += (test_conv_buffer(file) < 0 ? 1 : 0); nerrors += (test_tconv(file) < 0 ? 1 : 0); nerrors += (test_filters(file) < 0 ? 1 : 0); @@ -17312,7 +17741,7 @@ main(void) nerrors += (test_multiopen(file) < 0 ? 1 : 0); nerrors += (test_types(file) < 0 ? 1 : 0); nerrors += (test_floattypes(file) < 0 ? 1 : 0); - nerrors += (test_userblock_offset(driver_name, my_fapl, new_format) < 0 ? 1 : 0); + nerrors += (test_userblock_offset(driver_name, fapl, low >= H5F_LIBVER_V110) < 0 ? 1 : 0); if (driver_is_default_compatible) { nerrors += (test_missing_filter(file) < 0 ? 1 : 0); @@ -17321,10 +17750,10 @@ main(void) nerrors += (test_can_apply(file) < 0 ? 1 : 0); nerrors += (test_can_apply2(file) < 0 ? 1 : 0); nerrors += (test_optional_filters(file) < 0 ? 1 : 0); - nerrors += (test_set_local(my_fapl) < 0 ? 1 : 0); + nerrors += (test_set_local(fapl) < 0 ? 1 : 0); nerrors += (test_can_apply_szip(file) < 0 ? 1 : 0); nerrors += (test_compare_dcpl(file) < 0 ? 1 : 0); - nerrors += (test_copy_dcpl(file, my_fapl) < 0 ? 1 : 0); + nerrors += (test_copy_dcpl(file, fapl) < 0 ? 1 : 0); nerrors += (test_filter_delete(file) < 0 ? 1 : 0); if (driver_is_default_compatible) { @@ -17333,49 +17762,50 @@ main(void) nerrors += (test_zero_dims(file) < 0 ? 1 : 0); nerrors += (test_missing_chunk(file) < 0 ? 1 : 0); - nerrors += (test_random_chunks(my_fapl) < 0 ? 1 : 0); + nerrors += (test_random_chunks(fapl) < 0 ? 1 : 0); #ifndef H5_NO_DEPRECATED_SYMBOLS nerrors += (test_deprec(file) < 0 ? 1 : 0); #endif /* H5_NO_DEPRECATED_SYMBOLS */ - nerrors += (test_huge_chunks(my_fapl) < 0 ? 1 : 0); - nerrors += (test_chunk_cache(my_fapl) < 0 ? 1 : 0); - nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0); - nerrors += (test_chunk_fast(driver_name, my_fapl) < 0 ? 1 : 0); - nerrors += (test_reopen_chunk_fast(my_fapl) < 0 ? 1 : 0); - nerrors += (test_chunk_fast_bug1(my_fapl) < 0 ? 1 : 0); - nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0); - nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0); - nerrors += (test_fixed_array(my_fapl) < 0 ? 1 : 0); + nerrors += (test_huge_chunks(fapl) < 0 ? 1 : 0); + nerrors += (test_chunk_cache(fapl) < 0 ? 1 : 0); + nerrors += (test_big_chunks_bypass_cache(fapl) < 0 ? 1 : 0); + nerrors += (test_chunk_fast(driver_name, fapl) < 0 ? 1 : 0); + nerrors += (test_reopen_chunk_fast(fapl) < 0 ? 1 : 0); + nerrors += (test_chunk_fast_bug1(fapl) < 0 ? 1 : 0); + if (low >= H5F_LIBVER_V200) + nerrors += (test_chunk_expand2(fapl) < 0 ? 1 : 0); + else + nerrors += (test_chunk_expand(fapl) < 0 ? 1 : 0); + nerrors += (test_layout_extend(fapl) < 0 ? 1 : 0); + nerrors += (test_fixed_array(fapl) < 0 ? 1 : 0); if (driver_is_default_compatible) { nerrors += (test_idx_compatible() < 0 ? 1 : 0); } - nerrors += (test_unfiltered_edge_chunks(my_fapl) < 0 ? 1 : 0); - nerrors += (test_single_chunk(my_fapl) < 0 ? 1 : 0); - nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0); - nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0); - nerrors += (test_storage_size(my_fapl) < 0 ? 1 : 0); - nerrors += (test_power2up(my_fapl) < 0 ? 1 : 0); + nerrors += (test_unfiltered_edge_chunks(fapl) < 0 ? 1 : 0); + nerrors += (test_single_chunk(fapl) < 0 ? 1 : 0); + nerrors += (test_large_chunk_shrink(fapl) < 0 ? 1 : 0); + nerrors += (test_zero_dim_dset(fapl) < 0 ? 1 : 0); + nerrors += (test_storage_size(fapl) < 0 ? 1 : 0); + nerrors += (test_power2up(fapl) < 0 ? 1 : 0); - nerrors += (test_swmr_non_latest(driver_name, my_fapl) < 0 ? 1 : 0); - nerrors += (test_earray_hdr_fd(driver_name, my_fapl) < 0 ? 1 : 0); - nerrors += (test_farray_hdr_fd(driver_name, my_fapl) < 0 ? 1 : 0); - nerrors += (test_bt2_hdr_fd(driver_name, my_fapl) < 0 ? 1 : 0); + nerrors += (test_swmr_non_latest(driver_name, fapl) < 0 ? 1 : 0); + nerrors += (test_earray_hdr_fd(driver_name, fapl) < 0 ? 1 : 0); + nerrors += (test_farray_hdr_fd(driver_name, fapl) < 0 ? 1 : 0); + nerrors += (test_bt2_hdr_fd(driver_name, fapl) < 0 ? 1 : 0); nerrors += (test_downsize_vlen_scalar_dataset(file) < 0 ? 1 : 0); if (H5Fclose(file) < 0) goto error; - } /* end for new_format */ + } /* end for low */ } /* end for minimized_ohdr */ } /* end for paged */ /* Close property lists */ - if (H5Pclose(fapl2) < 0) - TEST_ERROR; if (H5Pclose(fcpl) < 0) TEST_ERROR; if (H5Pclose(fcpl2) < 0)