From a187efb4c2dd16eca1c31da2deb1c1db86ed707b Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 5 Jun 2025 11:27:30 -0400 Subject: [PATCH 01/53] Domain option version of get_dimension_index --- tiledb/sm/array_schema/domain.cc | 15 ++++++++++++--- tiledb/sm/array_schema/domain.h | 7 +++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/tiledb/sm/array_schema/domain.cc b/tiledb/sm/array_schema/domain.cc index bef2c6b8ad9..9c7b39ed390 100644 --- a/tiledb/sm/array_schema/domain.cc +++ b/tiledb/sm/array_schema/domain.cc @@ -491,15 +491,24 @@ bool Domain::has_dimension(const std::string& name) const { return false; } -unsigned Domain::get_dimension_index(const std::string& name) const { +std::optional Domain::dimension_index(const std::string& name) const { for (unsigned d = 0; d < dim_num_; ++d) { if (dimension_ptrs_[d]->name() == name) { return d; } } - throw std::invalid_argument( - "Cannot get dimension index; Invalid dimension name"); + return std::nullopt; +} + +unsigned Domain::get_dimension_index(const std::string& name) const { + const auto maybe = dimension_index(name); + if (maybe.has_value()) { + return maybe.value(); + } else { + throw std::invalid_argument( + "Cannot get dimension index; Invalid dimension name"); + } } bool Domain::null_tile_extents() const { diff --git a/tiledb/sm/array_schema/domain.h b/tiledb/sm/array_schema/domain.h index fa6a1a7188a..d2563c715d3 100644 --- a/tiledb/sm/array_schema/domain.h +++ b/tiledb/sm/array_schema/domain.h @@ -394,9 +394,16 @@ class Domain { * * @param name Name of dimension to check for * @return Dimension index + * @throws if the name is not a dimension */ unsigned get_dimension_index(const std::string& name) const; + /** + * @return the index in the domain of the requested dimension, or + * `std::nullopt` if not found + */ + std::optional dimension_index(const std::string& name) const; + /** Returns true if at least one dimension has null tile extent. */ bool null_tile_extents() const; From cb22d35dff7801f764f2d2b5d893d7dc5459365d Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 5 Jun 2025 16:28:57 -0400 Subject: [PATCH 02/53] Dimension::cell_size --- tiledb/sm/array_schema/dimension.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tiledb/sm/array_schema/dimension.h b/tiledb/sm/array_schema/dimension.h index 3f9f62be1f8..54e23a70f10 100644 --- a/tiledb/sm/array_schema/dimension.h +++ b/tiledb/sm/array_schema/dimension.h @@ -743,6 +743,17 @@ class Dimension { return cell_val_num_ == constants::var_num; } + /** + * @return the size in bytes of one cell for this dimension, or + * `constants::var_size` if variable-length + */ + uint64_t cell_size() const { + if (var_size()) { + return constants::var_size; + } + return cell_val_num_ * datatype_size(type_); + } + class DimensionDispatch { public: DimensionDispatch(const Dimension& base) From 760f643accd8df916d9820647b2fd84fffb07862 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 5 Jun 2025 16:31:10 -0400 Subject: [PATCH 03/53] Add tile global/order min/max, bump format version, not tested --- tiledb/sm/fragment/fragment_metadata.cc | 246 ++++++++++++++++-- tiledb/sm/fragment/fragment_metadata.h | 53 ++++ tiledb/sm/fragment/loaded_fragment_metadata.h | 66 +++++ tiledb/sm/misc/constants.cc | 6 +- tiledb/sm/misc/constants.h | 4 + tiledb/sm/query/writers/writer_base.cc | 14 + 6 files changed, 362 insertions(+), 27 deletions(-) diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index e19f7a0097a..32081e98757 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -57,6 +57,7 @@ #include "tiledb/sm/tile/generic_tile_io.h" #include "tiledb/sm/tile/tile.h" #include "tiledb/sm/tile/tile_metadata_generator.h" +#include "tiledb/sm/tile/writer_tile_tuple.h" #include "tiledb/storage_format/serialization/serializers.h" #include "tiledb/type/range/range.h" @@ -296,6 +297,28 @@ void FragmentMetadata::set_tile_max_var( } } +/** + * Converts a buffer of `uint64_t` variable-length data sizes into a buffer + * of `uint64_t` variable-length data offsets, in place. + * + * @return the total size of variable-length data + */ +static uint64_t convert_sizes_to_offsets( + uint64_t cell_start, uint64_t acc, std::span raw_offsets) { + const uint64_t num_offsets = raw_offsets.size() / sizeof(uint64_t); + uint64_t* offsets = + &reinterpret_cast(raw_offsets.data())[cell_start]; + + for (uint64_t i = cell_start; i < num_offsets; i++) { + auto size = *offsets; + *offsets = acc; + ++offsets; + acc += size; + } + + return acc; +} + void FragmentMetadata::convert_tile_min_max_var_sizes_to_offsets( const std::string& name) { auto it = idx_map_.find(name); @@ -303,39 +326,127 @@ void FragmentMetadata::convert_tile_min_max_var_sizes_to_offsets( auto idx = it->second; // Fix the min offsets. - uint64_t offset = loaded_metadata_ptr_->tile_min_var_buffer()[idx].size(); - auto offsets = - (uint64_t*)loaded_metadata_ptr_->tile_min_buffer()[idx].data() + - tile_index_base_; - for (uint64_t i = tile_index_base_; - i < - loaded_metadata_ptr_->tile_min_buffer()[idx].size() / sizeof(uint64_t); - i++) { - auto size = *offsets; - *offsets = offset; - offsets++; - offset += size; - } + const uint64_t min_var_total = convert_sizes_to_offsets( + tile_index_base_, + loaded_metadata_ptr_->tile_min_var_buffer()[idx].size(), + loaded_metadata_ptr_->tile_min_buffer()[idx]); // Allocate min var data buffer. - loaded_metadata_ptr_->tile_min_var_buffer()[idx].resize(offset); + loaded_metadata_ptr_->tile_min_var_buffer()[idx].resize(min_var_total); // Fix the max offsets. - offset = loaded_metadata_ptr_->tile_max_var_buffer()[idx].size(); - offsets = (uint64_t*)loaded_metadata_ptr_->tile_max_buffer()[idx].data() + - tile_index_base_; - for (uint64_t i = tile_index_base_; - i < - loaded_metadata_ptr_->tile_max_buffer()[idx].size() / sizeof(uint64_t); - i++) { - auto size = *offsets; - *offsets = offset; - offsets++; - offset += size; + const uint64_t max_var_total = convert_sizes_to_offsets( + tile_index_base_, + loaded_metadata_ptr_->tile_max_var_buffer()[idx].size(), + loaded_metadata_ptr_->tile_max_buffer()[idx]); + + // Allocate max var data buffer. + loaded_metadata_ptr_->tile_max_var_buffer()[idx].resize(max_var_total); +} + +void FragmentMetadata::set_tile_global_order_bounds_fixed( + const std::string& dim_name, uint64_t tile, const WriterTileTuple& data) { + iassert(data.cell_num() > 0); + + const auto dim = array_schema_->domain().get_dimension_index(dim_name); + + if (array_schema_->domain().dimensions()[dim]->var_size()) { + const uint64_t* source_offsets = data.offset_tile().data_as(); + + // NB: for now we set a length, and it will be updated to an offset + // via `convert_tile_global_order_bounds_sizes_to_offsets`, + // and then the var data will be written after that + uint64_t* min_sizes = reinterpret_cast( + loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data()); + uint64_t* max_sizes = reinterpret_cast( + loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data()); + + const uint64_t fixed_offset = tile / sizeof(uint64_t); + max_sizes[fixed_offset] = + data.var_tile().size() - source_offsets[data.cell_num() - 1]; + if (data.cell_num() == 1) { + min_sizes[fixed_offset] = max_sizes[fixed_offset]; + } else { + min_sizes[fixed_offset] = source_offsets[1] - source_offsets[0]; + } + } else { + const uint64_t fixed_size = + array_schema_->domain().dimensions()[dim]->cell_size(); + const uint8_t* fixed_data = data.fixed_tile().data_as(); + + void* min_data = + loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data(); + memcpy(min_data, &fixed_data[0], fixed_size); + + const uint64_t max_start = + data.fixed_tile() + .data_as()[fixed_size * (data.cell_num() - 1)]; + void* max_data = + loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data(); + memcpy(max_data, &fixed_data[max_start], fixed_size); + } +} + +void FragmentMetadata::set_tile_global_order_bounds_var( + const std::string& dim_name, uint64_t tile, const WriterTileTuple& data) { + const auto dim = array_schema_->domain().get_dimension_index(dim_name); + if (!array_schema_->domain().dimensions()[dim]->var_size()) { + return; } + iassert(data.cell_num() > 0); + + const uint64_t* min_sizes = reinterpret_cast( + loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data()); + const uint64_t* max_sizes = reinterpret_cast( + loaded_metadata_ptr_->tile_global_order_max_buffer()[dim].data()); + + const uint64_t* data_offsets = data.offset_tile().data_as(); + const uint64_t min_var_start = data_offsets[0]; + const uint64_t min_var_size = min_sizes[0]; + const uint64_t max_var_start = data_offsets[data.cell_num() - 1]; + const uint64_t max_var_size = max_sizes[data.cell_num() - 1]; + + if (min_var_size) { + memcpy( + &loaded_metadata_ptr_ + ->tile_global_order_min_var_buffer()[tile][min_var_start], + data.var_tile().data_as(), + min_var_size); + } + if (max_var_size) { + memcpy( + &loaded_metadata_ptr_ + ->tile_global_order_max_var_buffer()[tile][max_var_start], + data.var_tile().data_as(), + max_var_size); + } +} + +void FragmentMetadata::convert_tile_global_order_bounds_sizes_to_offsets( + const std::string& dim_name) { + // NB: this is dimensions only, so do not use `idx_map_` + const auto idx = array_schema_->domain().get_dimension_index(dim_name); + + // Fix the min offsets. + const uint64_t min_var_total = convert_sizes_to_offsets( + tile_index_base_, + loaded_metadata_ptr_->tile_global_order_min_var_buffer()[idx].size(), + loaded_metadata_ptr_->tile_global_order_min_buffer()[idx]); + // Allocate min var data buffer. - loaded_metadata_ptr_->tile_max_var_buffer()[idx].resize(offset); + loaded_metadata_ptr_->tile_global_order_min_var_buffer()[idx].resize( + min_var_total); + + // Fix the max offsets. + const uint64_t max_var_total = convert_sizes_to_offsets( + tile_index_base_, + loaded_metadata_ptr_->tile_global_order_max_var_buffer()[idx].size(), + loaded_metadata_ptr_->tile_global_order_max_buffer()[idx]); + + // Allocate max var data buffer. + loaded_metadata_ptr_->tile_global_order_max_var_buffer()[idx].resize( + max_var_total); } void FragmentMetadata::set_tile_sum( @@ -1160,6 +1271,25 @@ void FragmentMetadata::store_v15_or_higher( offset += nbytes; } + if (version_ >= constants::fragment_metadata_global_order_bounds_version) { + const auto num_dims = array_schema_->dim_num(); + // Store global order mins + gt_offsets_.tile_global_order_min_offsets_.resize(num_dims); + for (unsigned i = 0; i < num_dims; ++i) { + gt_offsets_.tile_global_order_min_offsets_[i] = offset; + store_tile_global_order_mins(i, encryption_key, &nbytes); + offset += nbytes; + } + + // Store global order maxs + gt_offsets_.tile_global_order_max_offsets_.resize(num_dims); + for (unsigned i = 0; i < num_dims; ++i) { + gt_offsets_.tile_global_order_max_offsets_[i] = offset; + store_tile_global_order_mins(i, encryption_key, &nbytes); + offset += nbytes; + } + } + // Store sums gt_offsets_.tile_sum_offsets_.resize(num); for (unsigned int i = 0; i < num; ++i) { @@ -2991,6 +3121,70 @@ void FragmentMetadata::write_tile_maxs(unsigned idx, Serializer& serializer) { } } +void FragmentMetadata::store_tile_global_order_mins( + unsigned dim, const EncryptionKey& encryption_key, uint64_t* nbytes) { + SizeComputationSerializer size_computation_serializer; + write_tile_global_order_mins(dim, size_computation_serializer); + + auto tile{WriterTile::from_generic( + size_computation_serializer.size(), memory_tracker_)}; + + Serializer serializer(tile->data(), tile->size()); + write_tile_global_order_mins(dim, serializer); + write_generic_tile_to_file(encryption_key, tile, nbytes); + + resources_->stats().add_counter("write_global_order_mins_size", *nbytes); +} + +void FragmentMetadata::write_tile_global_order_mins( + unsigned dim, Serializer& serializer) { + const auto& fixedPart = + loaded_metadata_ptr_->tile_global_order_min_buffer()[dim]; + const auto& varPart = + loaded_metadata_ptr_->tile_global_order_min_var_buffer()[dim]; + serializer.write(fixedPart.size()); + serializer.write(varPart.size()); + + if (!fixedPart.empty()) { + serializer.write(fixedPart.data(), fixedPart.size()); + } + if (!varPart.empty()) { + serializer.write(varPart.data(), varPart.size()); + } +} + +void FragmentMetadata::store_tile_global_order_maxs( + unsigned dim, const EncryptionKey& encryption_key, uint64_t* nbytes) { + SizeComputationSerializer size_computation_serializer; + write_tile_global_order_maxs(dim, size_computation_serializer); + + auto tile{WriterTile::from_generic( + size_computation_serializer.size(), memory_tracker_)}; + + Serializer serializer(tile->data(), tile->size()); + write_tile_global_order_maxs(dim, serializer); + write_generic_tile_to_file(encryption_key, tile, nbytes); + + resources_->stats().add_counter("write_global_order_maxs_size", *nbytes); +} + +void FragmentMetadata::write_tile_global_order_maxs( + unsigned dim, Serializer& serializer) { + const auto& fixedPart = + loaded_metadata_ptr_->tile_global_order_max_buffer()[dim]; + const auto& varPart = + loaded_metadata_ptr_->tile_global_order_max_var_buffer()[dim]; + serializer.write(fixedPart.size()); + serializer.write(varPart.size()); + + if (!fixedPart.empty()) { + serializer.write(fixedPart.data(), fixedPart.size()); + } + if (!varPart.empty()) { + serializer.write(varPart.data(), varPart.size()); + } +} + void FragmentMetadata::store_tile_sums( unsigned idx, const EncryptionKey& encryption_key, uint64_t* nbytes) { SizeComputationSerializer size_computation_serializer; diff --git a/tiledb/sm/fragment/fragment_metadata.h b/tiledb/sm/fragment/fragment_metadata.h index 931d6d0099f..82034d54043 100644 --- a/tiledb/sm/fragment/fragment_metadata.h +++ b/tiledb/sm/fragment/fragment_metadata.h @@ -63,6 +63,7 @@ class Buffer; class EncryptionKey; class TileMetadata; class MemoryTracker; +class WriterTileTuple; class FragmentMetadataStatusException : public StatusException { public: @@ -139,6 +140,8 @@ class FragmentMetadata { std::vector tile_validity_offsets_; std::vector tile_min_offsets_; std::vector tile_max_offsets_; + std::vector tile_global_order_min_offsets_; // per dimension only + std::vector tile_global_order_max_offsets_; // per dimension only std::vector tile_sum_offsets_; std::vector tile_null_count_offsets_; uint64_t fragment_min_max_sum_null_count_offset_; @@ -587,6 +590,26 @@ class FragmentMetadata { */ void convert_tile_min_max_var_sizes_to_offsets(const std::string& name); + /** + * Populate fixed parts of bounds for a dimension and tile using the + * coordinate values in `data`. + */ + void set_tile_global_order_bounds_fixed( + const std::string& dim_name, uint64_t tile, const WriterTileTuple& data); + + /** + * Populate var parts of bounds for a dimension and tile using the coordinate + * values in `data`. + */ + void set_tile_global_order_bounds_var( + const std::string& dim_name, uint64_t tile, const WriterTileTuple& data); + + /** + * Converts global order min/max sizes to offsets. + */ + void convert_tile_global_order_bounds_sizes_to_offsets( + const std::string& dim_name); + /** * Sets a tile sum for the input attribute. * @@ -1303,6 +1326,36 @@ class FragmentMetadata { */ void write_tile_maxs(unsigned idx, Serializer& serializer); + /** + * Writes the global order minimum of each tile to storage. + * + * @param dim The index of the dimension + * @param encryption_key The encryption key + * @param nbytes[in/out] The total number of bytes written + */ + void store_tile_global_order_mins( + unsigned dim, const EncryptionKey& encryption_key, uint64_t* nbytes); + + /** + * Writes the global order minimum of each tile to the input buffer. + */ + void write_tile_global_order_mins(unsigned dim, Serializer& serializer); + + /** + * Writes the global order maximum of each tile to storage. + * + * @param dim The index of the dimension + * @param encryption_key The encryption key + * @param nbytes[in/out] The total number of bytes written + */ + void store_tile_global_order_maxs( + unsigned dim, const EncryptionKey& encryption_key, uint64_t* nbytes); + + /** + * Writes the global order maximum of each tile to the input buffer. + */ + void write_tile_global_order_maxs(unsigned dim, Serializer& serializer); + /** * Writes the sums of the input attribute to storage. * diff --git a/tiledb/sm/fragment/loaded_fragment_metadata.h b/tiledb/sm/fragment/loaded_fragment_metadata.h index cd34362d73b..0707441fb73 100644 --- a/tiledb/sm/fragment/loaded_fragment_metadata.h +++ b/tiledb/sm/fragment/loaded_fragment_metadata.h @@ -456,6 +456,38 @@ class LoadedFragmentMetadata { return tile_max_var_buffer_; } + inline const auto& tile_global_order_min_buffer() const { + return tile_global_order_min_buffer_; + } + + inline const auto& tile_global_order_min_var_buffer() const { + return tile_global_order_min_var_buffer_; + } + + inline const auto& tile_global_order_max_buffer() const { + return tile_global_order_max_buffer_; + } + + inline const auto& tile_global_order_max_var_buffer() const { + return tile_global_order_max_var_buffer_; + } + + inline auto& tile_global_order_min_buffer() { + return tile_global_order_min_buffer_; + } + + inline auto& tile_global_order_min_var_buffer() { + return tile_global_order_min_var_buffer_; + } + + inline auto& tile_global_order_max_buffer() { + return tile_global_order_max_buffer_; + } + + inline auto& tile_global_order_max_var_buffer() { + return tile_global_order_max_var_buffer_; + } + /** Returns an RTree for the MBRs. */ inline const RTree& rtree() const { return rtree_; @@ -655,6 +687,40 @@ class LoadedFragmentMetadata { */ tdb::pmr::vector> tile_max_var_buffer_; + /** + * The tile global order minima. + * + * The outer vector is indexed by dimension `d`. + * The inner vector is the concatenated values of the `d`th + * dimension of the global order minimum coordinate for each tile. + * + * For variable-length dimensions the value stored here is the offset + * into the corresponding variable-length buffer. + */ + tdb::pmr::vector> tile_global_order_min_buffer_; + + /** + * The tile global order maxima. + * + * The outer vector is indexed by dimension `d`. + * The inner vector is the concatenated values of the `d`th + * dimension of the global order maximum coordinate for each tile. + * + * For variable-length dimensions the value stored here is the offset + * into the corresponding variable-length buffer. + */ + tdb::pmr::vector> tile_global_order_max_buffer_; + + /** + * The tile global order minima, variable-length part. + */ + tdb::pmr::vector> tile_global_order_min_var_buffer_; + + /** + * The tile global order maxima, variable-length part. + */ + tdb::pmr::vector> tile_global_order_max_var_buffer_; + /** * The tile sum values, ignored for var sized attributes/dimensions. */ diff --git a/tiledb/sm/misc/constants.cc b/tiledb/sm/misc/constants.cc index 18c20eacdd0..44720f33962 100644 --- a/tiledb/sm/misc/constants.cc +++ b/tiledb/sm/misc/constants.cc @@ -690,7 +690,7 @@ const int32_t library_version[3] = { TILEDB_VERSION_MAJOR, TILEDB_VERSION_MINOR, TILEDB_VERSION_PATCH}; /** The TileDB serialization base format version number. */ -const format_version_t format_version = 22; +const format_version_t format_version = 23; /** The lowest version supported for back compat writes. */ const format_version_t back_compat_writes_min_format_version = 7; @@ -722,6 +722,10 @@ const format_version_t current_domain_version = 0; /** The NDRectangle current domain */ const std::string current_domain_ndrectangle_str = "NDRECTANGLE"; +/** The lowest version where fragment metadata contains per-tile global order + * bounds */ +const format_version_t fragment_metadata_global_order_bounds_version = 23; + /** The maximum size of a tile chunk (unit of compression) in bytes. */ const uint64_t max_tile_chunk_size = 64 * 1024; diff --git a/tiledb/sm/misc/constants.h b/tiledb/sm/misc/constants.h index 6db29c26702..9e6561894f5 100644 --- a/tiledb/sm/misc/constants.h +++ b/tiledb/sm/misc/constants.h @@ -717,6 +717,10 @@ extern const format_version_t current_domain_version; /** The NDRectangle current_domain */ extern const std::string current_domain_ndrectangle_str; +/** The lowest version where fragment metadata contains per-tile global order + * bounds */ +extern const format_version_t fragment_metadata_global_order_bounds_version; + /** The maximum size of a tile chunk (unit of compression) in bytes. */ extern const uint64_t max_tile_chunk_size; diff --git a/tiledb/sm/query/writers/writer_base.cc b/tiledb/sm/query/writers/writer_base.cc index 4962018733e..8857f996250 100644 --- a/tiledb/sm/query/writers/writer_base.cc +++ b/tiledb/sm/query/writers/writer_base.cc @@ -1058,6 +1058,14 @@ Status WriterBase::write_tiles( attr, idx - start_tile_idx, tiles[idx].max()); } } + + if (array_schema_.is_dim(attr)) { + frag_meta->convert_tile_global_order_bounds_sizes_to_offsets(attr); + for (uint64_t idx = start_tile_idx; idx < end_tile_idx; idx++) { + frag_meta->set_tile_global_order_bounds_var( + attr, idx - start_tile_idx, tiles[idx]); + } + } return Status::Ok(); })); } @@ -1127,11 +1135,17 @@ Status WriterBase::write_tiles( frag_meta->set_tile_min_var_size(name, tile_id, tile.min().size()); frag_meta->set_tile_max_var_size(name, tile_id, tile.max().size()); } + if (array_schema_.is_dim(name)) { + frag_meta->set_tile_global_order_bounds_fixed(name, tile_id, tile); + } } else { if (has_min_max_md && null_count != frag_meta->cell_num(tile_id)) { frag_meta->set_tile_min(name, tile_id, tile.min()); frag_meta->set_tile_max(name, tile_id, tile.max()); } + if (array_schema_.is_dim(name)) { + frag_meta->set_tile_global_order_bounds_fixed(name, tile_id, tile); + } if (has_sum_md) { frag_meta->set_tile_sum(name, tile_id, tile.sum()); From fb387bd6381522aeccc86313b05bc51846034025 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 21 Jul 2025 16:29:45 -0400 Subject: [PATCH 04/53] Stubs for C API retrieving tile global order bounds --- .../c_api/fragment_info/fragment_info_api.cc | 56 +++++++++++++ .../fragment_info_api_external.h | 78 +++++++++++++++++++ .../fragment_info_api_internal.h | 18 +++++ tiledb/sm/fragment/fragment_info.cc | 18 +++++ tiledb/sm/fragment/fragment_info.h | 22 ++++++ 5 files changed, 192 insertions(+) diff --git a/tiledb/api/c_api/fragment_info/fragment_info_api.cc b/tiledb/api/c_api/fragment_info/fragment_info_api.cc index 4a66a5e869c..43af7244567 100644 --- a/tiledb/api/c_api/fragment_info/fragment_info_api.cc +++ b/tiledb/api/c_api/fragment_info/fragment_info_api.cc @@ -316,6 +316,36 @@ capi_return_t tiledb_fragment_info_get_mbr_var_from_name( return TILEDB_OK; } +capi_return_t tiledb_fragment_info_get_global_order_lower_bound( + tiledb_fragment_info_t* fragment_info, + uint32_t fragment_id, + uint32_t mbr_id, + size_t* dimension_sizes, + void** dimensions) TILEDB_NOEXCEPT { + ensure_fragment_info_is_valid(fragment_info); + ensure_output_pointer_is_valid(dimensions); + + throw_if_not_ok(fragment_info->get_global_order_upper_bound( + fragment_id, mbr_id, dimension_sizes, dimensions)); + + return TILEDB_OK; +} + +capi_return_t tiledb_fragment_info_get_global_order_upper_bound( + tiledb_fragment_info_t* fragment_info, + uint32_t fragment_id, + uint32_t mbr_id, + size_t* dimension_sizes, + void** dimensions) TILEDB_NOEXCEPT { + ensure_fragment_info_is_valid(fragment_info); + ensure_output_pointer_is_valid(dimensions); + + throw_if_not_ok(fragment_info->get_global_order_upper_bound( + fragment_id, mbr_id, dimension_sizes, dimensions)); + + return TILEDB_OK; +} + capi_return_t tiledb_fragment_info_get_cell_num( tiledb_fragment_info_t* fragment_info, uint32_t fid, uint64_t* cell_num) { ensure_fragment_info_is_valid(fragment_info); @@ -709,6 +739,32 @@ CAPI_INTERFACE( ctx, fragment_info, fid, mid, dim_name, start, end); } +CAPI_INTERFACE( + fragment_info_get_global_order_lower_bound, + tiledb_ctx_t* ctx, + tiledb_fragment_info_t* fragment_info, + uint32_t fragment_id, + uint32_t mbr_id, + size_t* dimension_sizes, + void** dimensions) { + return api_entry_context< + tiledb::api::tiledb_fragment_info_get_global_order_lower_bound>( + ctx, fragment_info, fragment_id, mbr_id, dimension_sizes, dimensions); +} + +CAPI_INTERFACE( + fragment_info_get_global_order_upper_bound, + tiledb_ctx_t* ctx, + tiledb_fragment_info_t* fragment_info, + uint32_t fragment_id, + uint32_t mbr_id, + size_t* dimension_sizes, + void** dimensions) { + return api_entry_context< + tiledb::api::tiledb_fragment_info_get_global_order_upper_bound>( + ctx, fragment_info, fragment_id, mbr_id, dimension_sizes, dimensions); +} + CAPI_INTERFACE( fragment_info_get_cell_num, tiledb_ctx_t* ctx, diff --git a/tiledb/api/c_api/fragment_info/fragment_info_api_external.h b/tiledb/api/c_api/fragment_info/fragment_info_api_external.h index d08774a5712..548d45d08fe 100644 --- a/tiledb/api/c_api/fragment_info/fragment_info_api_external.h +++ b/tiledb/api/c_api/fragment_info/fragment_info_api_external.h @@ -713,6 +713,84 @@ TILEDB_EXPORT capi_return_t tiledb_fragment_info_get_mbr_var_from_name( void* start, void* end) TILEDB_NOEXCEPT; +/** + * Retrieves the minimum coordinate in global order for a + * particular bounding rectangle in a fragment. + * + * **Example:** + * + * @code{.c} + * int32_t cell_id; + * size_t gene_name_size; + * char gene_name[MAX_GENE_NAME_LEN]; + * + * size_t dimension_sizes[] = { nullptr, &gene_name_size }; + * void *dimensions[] = { &cell_id, &gene_name[0] }; + * + * tiledb_fragment_info_get_global_order_lower_bound(ctx, + * fragment_info, 0, 0, + * &dimension_sizes[0], + * &dimensions[0]); + * @endcode + * + * @param[in] ctx The TileDB context + * @param[in] fragment_info The fragment info object. + * @param[in] fragment_id The index of the fragment of interest + * @param[in] mbr_id The mbr of the fragment of interest + * @param[in/out] dimension_sizes An array of pointers to sizes to store the + * length of the value of each variable-length dimension + * @param[in/out] dimensions An array of pointers to buffers to store the value + * from each dimension + * @return TILEDB_OK if successful, TILEDB_ERR if an error occurs or if the + * fragment version is prior + */ +TILEDB_EXPORT capi_return_t tiledb_fragment_info_get_global_order_lower_bound( + tiledb_ctx_t* ctx, + tiledb_fragment_info_t* fragment_info, + uint32_t fragment_id, + uint32_t mbr_id, + size_t* dimension_sizes, + void** dimensions) TILEDB_NOEXCEPT; + +/** + * Retrieves the maximum coordinate in global order for a + * particular bounding rectangle in a fragment. + * + * **Example:** + * + * @code{.c} + * int32_t cell_id; + * size_t gene_name_size; + * char gene_name[MAX_GENE_NAME_LEN]; + * + * size_t dimension_sizes[] = { nullptr, &gene_name_size }; + * void *dimensions[] = { &cell_id, &gene_name[0] }; + * + * tiledb_fragment_info_get_global_order_upper_bound(ctx, + * fragment_info, 0, 0, + * &dimension_sizes[0], + * &dimensions[0]); + * @endcode + * + * @param[in] ctx The TileDB context + * @param[in] fragment_info The fragment info object. + * @param[in] fragment_id The index of the fragment of interest + * @param[in] mbr_id The mbr of the fragment of interest + * @param[in/out] dimension_sizes An array of pointers to sizes to store the + * length of the value of each variable-length dimension + * @param[in/out] dimensions An array of pointers to buffers to store the value + * from each dimension + * @return TILEDB_OK if successful, TILEDB_ERR if an error occurs or if the + * fragment version is prior + */ +TILEDB_EXPORT capi_return_t tiledb_fragment_info_get_global_order_upper_bound( + tiledb_ctx_t* ctx, + tiledb_fragment_info_t* fragment_info, + uint32_t fragment_id, + uint32_t mbr_id, + size_t* dimension_sizes, + void** dimensions) TILEDB_NOEXCEPT; + /** * Retrieves the number of cells written to the fragment by the user. * diff --git a/tiledb/api/c_api/fragment_info/fragment_info_api_internal.h b/tiledb/api/c_api/fragment_info/fragment_info_api_internal.h index 1140cb33a72..c4e02d5467c 100644 --- a/tiledb/api/c_api/fragment_info/fragment_info_api_internal.h +++ b/tiledb/api/c_api/fragment_info/fragment_info_api_internal.h @@ -151,6 +151,24 @@ struct tiledb_fragment_info_handle_t fid, mid, dim_name, start_size, end_size); } + Status get_global_order_lower_bound( + uint32_t fid, + uint32_t mid, + size_t* dimension_sizes, + void** dimension_ptrs) { + return fragment_info_->get_global_order_lower_bound( + fid, mid, dimension_sizes, dimension_ptrs); + } + + Status get_global_order_upper_bound( + uint32_t fid, + uint32_t mid, + size_t* dimension_sizes, + void** dimension_ptrs) { + return fragment_info_->get_global_order_upper_bound( + fid, mid, dimension_sizes, dimension_ptrs); + } + Status get_non_empty_domain(uint32_t fid, uint32_t did, void* domain) const { return fragment_info_->get_non_empty_domain(fid, did, domain); } diff --git a/tiledb/sm/fragment/fragment_info.cc b/tiledb/sm/fragment/fragment_info.cc index 87c3c7a76e2..2d49a7bea49 100644 --- a/tiledb/sm/fragment/fragment_info.cc +++ b/tiledb/sm/fragment/fragment_info.cc @@ -722,6 +722,24 @@ Status FragmentInfo::get_mbr_var( return get_mbr_var(fid, mid, did, start, end); } +Status FragmentInfo::get_global_order_lower_bound( + uint32_t fid, uint32_t mid, size_t* dimension_sizes, void** dimensions) { + (void)fid; + (void)mid; + (void)dimension_sizes; + (void)dimensions; + throw FragmentInfoException("TODO"); +} + +Status FragmentInfo::get_global_order_upper_bound( + uint32_t fid, uint32_t mid, size_t* dimension_sizes, void** dimensions) { + (void)fid; + (void)mid; + (void)dimension_sizes; + (void)dimensions; + throw FragmentInfoException("TODO"); +} + Status FragmentInfo::get_version(uint32_t fid, uint32_t* version) const { ensure_loaded(); if (version == nullptr) { diff --git a/tiledb/sm/fragment/fragment_info.h b/tiledb/sm/fragment/fragment_info.h index 2dee1a06e48..dd23baa4124 100644 --- a/tiledb/sm/fragment/fragment_info.h +++ b/tiledb/sm/fragment/fragment_info.h @@ -215,6 +215,28 @@ class FragmentInfo { Status get_mbr_var( uint32_t fid, uint32_t mid, const char* dim_name, void* start, void* end); + /** + * Retrieves a global order lower bound of the fragment with the given index + * for the tile indexed by `mid`. + * + * For fragments of version 23 and newer this is precise. + * Otherwise the minimum bounding rectangle is used as an imprecise lower + * bound. + */ + Status get_global_order_lower_bound( + uint32_t fid, uint32_t mid, size_t* dimension_sizes, void** dimensions); + + /** + * Retrieves a global order upper bound of the fragment with the given index + * for the tile indexed by `mid`. + * + * For fragments of version 23 and newer this is precise. + * Otherwise the minimum bounding rectangle is used as an imprecise upper + * bound. + */ + Status get_global_order_upper_bound( + uint32_t fid, uint32_t mid, size_t* dimension_sizes, void** dimensions); + /** Retrieves the version of the fragment with the given index. */ Status get_version(uint32_t fid, uint32_t* version) const; From d3d99369c0715c1aae16184bd661ffadfdfc0148 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 5 Aug 2025 11:52:37 -0400 Subject: [PATCH 05/53] Fixed-length simple test passes --- test/src/unit-capi-fragment_info.cc | 120 ++++ .../c_api/fragment_info/fragment_info_api.cc | 6 +- tiledb/common/memory_tracker.cc | 4 + tiledb/common/memory_tracker.h | 2 + tiledb/sm/fragment/fragment_info.cc | 115 +++- tiledb/sm/fragment/fragment_metadata.cc | 92 ++- .../sm/fragment/loaded_fragment_metadata.cc | 14 + tiledb/sm/fragment/loaded_fragment_metadata.h | 28 + .../sm/fragment/ondemand_fragment_metadata.cc | 194 ++++-- .../sm/fragment/ondemand_fragment_metadata.h | 34 + .../v1v2preloaded_fragment_metadata.cc | 18 + .../v1v2preloaded_fragment_metadata.h | 32 + tiledb/sm/query/writers/writer_base.cc | 4 +- tiledb/sm/serialization/fragment_metadata.cc | 222 +++--- tiledb/sm/serialization/tiledb-rest.capnp | 16 + tiledb/sm/serialization/tiledb-rest.capnp.c++ | 478 ++++++++----- tiledb/sm/serialization/tiledb-rest.capnp.h | 640 +++++++++++++++++- tiledb/sm/tile/tile_metadata_generator.cc | 43 +- tiledb/sm/tile/tile_metadata_generator.h | 17 + tiledb/sm/tile/writer_tile_tuple.cc | 14 + tiledb/sm/tile/writer_tile_tuple.h | 34 + 21 files changed, 1790 insertions(+), 337 deletions(-) diff --git a/test/src/unit-capi-fragment_info.cc b/test/src/unit-capi-fragment_info.cc index 2c12d6eefe7..a5a378df446 100644 --- a/test/src/unit-capi-fragment_info.cc +++ b/test/src/unit-capi-fragment_info.cc @@ -30,6 +30,7 @@ * Tests the C API functions for manipulating fragment information. */ +#include "test/support/src/error_helpers.h" #include "test/support/src/helpers.h" #include "test/support/src/serialization_wrappers.h" #include "tiledb/sm/c_api/tiledb.h" @@ -553,6 +554,22 @@ TEST_CASE( ctx, fragment_info, 1, 0, "d", &mbr[0]); CHECK(rc == TILEDB_ERR); + // Get global order lower bound - should fail since it's a dense array + { + void* dimensions[] = {&mbr[0], &mbr[1]}; + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 0, 0, nullptr, &dimensions[0]); + CHECK(rc == TILEDB_ERR); + } + + // Get global order upper bound - should fail since it's a dense array + { + void* dimensions[] = {&mbr[0], &mbr[1]}; + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 0, 0, nullptr, &dimensions[0]); + CHECK(rc == TILEDB_ERR); + } + // Get version uint32_t version; rc = tiledb_fragment_info_get_version(ctx, fragment_info, 0, &version); @@ -731,6 +748,7 @@ TEST_CASE("C API: Test MBR fragment info", "[capi][fragment_info][mbr]") { rc = tiledb_fragment_info_get_mbr_num(ctx, fragment_info, 0, &mbr_num); CHECK(rc == TILEDB_OK); CHECK(mbr_num == 1); + rc = tiledb_fragment_info_get_mbr_num(ctx, fragment_info, 1, &mbr_num); CHECK(rc == TILEDB_OK); CHECK(mbr_num == 2); @@ -753,6 +771,108 @@ TEST_CASE("C API: Test MBR fragment info", "[capi][fragment_info][mbr]") { CHECK(rc == TILEDB_OK); CHECK(mbr == std::vector{7, 8}); + // Get global order lower bounds + { + std::vector lower_bound(2); + void* dimensions[] = {&lower_bound[0], &lower_bound[1]}; + + // first fragment - one tile + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 0, 0, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(lower_bound == std::vector{1, 1}); + } + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 0, 1, nullptr, &dimensions[0]); + CHECK(rc == TILEDB_ERR); + + // second fragment - two tiles + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 1, 0, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(lower_bound == std::vector{1, 1}); + } + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 1, 1, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(lower_bound == std::vector{7, 7}); + } + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 1, 2, nullptr, &dimensions[0]); + CHECK(rc == TILEDB_ERR); + + // third fragment - two tiles + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 2, 0, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(lower_bound == std::vector{1, 1}); + } + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 2, 1, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(lower_bound == std::vector{1, 8}); + } + rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx, fragment_info, 2, 2, nullptr, &dimensions[0]); + CHECK(rc == TILEDB_ERR); + } + + // Get global order upper bounds + { + std::vector upper_bound(2); + void* dimensions[] = {&upper_bound[0], &upper_bound[1]}; + + // first fragment - one tile + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 0, 0, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(upper_bound == std::vector{2, 2}); + } + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 0, 1, nullptr, &dimensions[0]); + CHECK(rc == TILEDB_ERR); + + // second fragment - two tiles + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 1, 0, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(upper_bound == std::vector{2, 2}); + } + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 1, 1, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(upper_bound == std::vector{8, 8}); + } + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 1, 2, nullptr, &dimensions[0]); + CHECK(rc == TILEDB_ERR); + + // third fragment - two tiles + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 2, 0, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(upper_bound == std::vector{2, 2}); + } + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 2, 1, nullptr, &dimensions[0]); + CHECK(error_if_any(ctx, rc) == std::nullopt); + if (rc == TILEDB_OK) { + CHECK(upper_bound == std::vector{7, 7}); + } + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx, fragment_info, 2, 2, nullptr, &dimensions[0]); + CHECK(rc == TILEDB_ERR); + } + // Clean up tiledb_fragment_info_free(&fragment_info); remove_dir(array_name, ctx, vfs); diff --git a/tiledb/api/c_api/fragment_info/fragment_info_api.cc b/tiledb/api/c_api/fragment_info/fragment_info_api.cc index 43af7244567..3db174209e2 100644 --- a/tiledb/api/c_api/fragment_info/fragment_info_api.cc +++ b/tiledb/api/c_api/fragment_info/fragment_info_api.cc @@ -321,11 +321,11 @@ capi_return_t tiledb_fragment_info_get_global_order_lower_bound( uint32_t fragment_id, uint32_t mbr_id, size_t* dimension_sizes, - void** dimensions) TILEDB_NOEXCEPT { + void** dimensions) { ensure_fragment_info_is_valid(fragment_info); ensure_output_pointer_is_valid(dimensions); - throw_if_not_ok(fragment_info->get_global_order_upper_bound( + throw_if_not_ok(fragment_info->get_global_order_lower_bound( fragment_id, mbr_id, dimension_sizes, dimensions)); return TILEDB_OK; @@ -336,7 +336,7 @@ capi_return_t tiledb_fragment_info_get_global_order_upper_bound( uint32_t fragment_id, uint32_t mbr_id, size_t* dimension_sizes, - void** dimensions) TILEDB_NOEXCEPT { + void** dimensions) { ensure_fragment_info_is_valid(fragment_info); ensure_output_pointer_is_valid(dimensions); diff --git a/tiledb/common/memory_tracker.cc b/tiledb/common/memory_tracker.cc index 112ddc22ccb..bd80c122db2 100644 --- a/tiledb/common/memory_tracker.cc +++ b/tiledb/common/memory_tracker.cc @@ -97,6 +97,10 @@ std::string memory_type_to_str(MemoryType type) { return "TileMaxVals"; case MemoryType::TILE_MIN_VALS: return "TileMinVals"; + case MemoryType::TILE_GLOBAL_ORDER_MAX_VALS: + return "TileGlobalOrderMaxVals"; + case MemoryType::TILE_GLOBAL_ORDER_MIN_VALS: + return "TileGlobalOrderMinVals"; case MemoryType::TILE_NULL_COUNTS: return "TileNullCounts"; case MemoryType::TILE_OFFSETS: diff --git a/tiledb/common/memory_tracker.h b/tiledb/common/memory_tracker.h index e6f67238570..1ebd225264f 100644 --- a/tiledb/common/memory_tracker.h +++ b/tiledb/common/memory_tracker.h @@ -132,6 +132,8 @@ enum class MemoryType { TILE_HILBERT_VALUES, TILE_MAX_VALS, TILE_MIN_VALS, + TILE_GLOBAL_ORDER_MIN_VALS, + TILE_GLOBAL_ORDER_MAX_VALS, TILE_NULL_COUNTS, TILE_OFFSETS, TILE_SUMS, diff --git a/tiledb/sm/fragment/fragment_info.cc b/tiledb/sm/fragment/fragment_info.cc index 2d49a7bea49..16a89b22e2b 100644 --- a/tiledb/sm/fragment/fragment_info.cc +++ b/tiledb/sm/fragment/fragment_info.cc @@ -722,22 +722,117 @@ Status FragmentInfo::get_mbr_var( return get_mbr_var(fid, mid, did, start, end); } +static Status read_global_order_bound_to_user_buffers( + const ArraySchema& schema, + const auto& fixedPart, + const auto& varPart, + uint32_t which_tile, + size_t* dimension_sizes, + void** dimensions) { + const auto ds = schema.domain().dimensions(); + for (uint64_t d = 0; d < ds.size(); d++) { + if (ds[d]->var_size()) { + if (dimension_sizes == nullptr) { + throw FragmentInfoException( + "Cannot get MBR global order bound: Variable-length dimension " + "requires non-NULL dimension_sizes argument"); + } + + std::span offsets( + reinterpret_cast(fixedPart[d].data()), + fixedPart[d].size() / sizeof(uint64_t)); + if (which_tile >= offsets.size()) { + throw FragmentInfoException( + "Cannot get MBR global order bound: Invalid mbr index"); + } else if (which_tile + 1 == offsets.size()) { + dimension_sizes[d] = varPart[d].size() - offsets[which_tile]; + } else { + dimension_sizes[d] = offsets[which_tile + 1] - offsets[which_tile]; + } + + const void* coord = &varPart[d][offsets[which_tile]]; + memcpy(dimensions[d], coord, dimension_sizes[d]); + } else { + const uint64_t dimFixedSize = ds[d]->cell_size(); + if (dimFixedSize * which_tile >= fixedPart[d].size()) { + throw FragmentInfoException( + "Cannot get MBR global order bound: Invalid mbr index"); + } + + const void* coord = &fixedPart[d].data()[which_tile * dimFixedSize]; + memcpy(dimensions[d], coord, dimFixedSize); + } + } + + return Status::Ok(); +} + Status FragmentInfo::get_global_order_lower_bound( uint32_t fid, uint32_t mid, size_t* dimension_sizes, void** dimensions) { - (void)fid; - (void)mid; - (void)dimension_sizes; - (void)dimensions; - throw FragmentInfoException("TODO"); + ensure_loaded(); + if (fid >= fragment_num()) { + throw FragmentInfoException( + "Cannot get MBR global order bound: Invalid fragment index"); + } + + if (!single_fragment_info_vec_[fid].sparse()) { + throw FragmentInfoException("Cannot get MBR; Fragment is not sparse"); + } + + auto& meta = single_fragment_info_vec_[fid].meta(); + meta->loaded_metadata()->load_fragment_tile_global_order_bounds(enc_key_); + + const auto& fixedPart = + meta->loaded_metadata()->tile_global_order_min_buffer(); + if (fixedPart.empty()) { + throw FragmentInfoException( + "Cannot get MBR global order bound: Unavailable"); + } + + const auto& varPart = + meta->loaded_metadata()->tile_global_order_min_var_buffer(); + + return read_global_order_bound_to_user_buffers( + *meta->array_schema(), + fixedPart, + varPart, + mid, + dimension_sizes, + dimensions); } Status FragmentInfo::get_global_order_upper_bound( uint32_t fid, uint32_t mid, size_t* dimension_sizes, void** dimensions) { - (void)fid; - (void)mid; - (void)dimension_sizes; - (void)dimensions; - throw FragmentInfoException("TODO"); + ensure_loaded(); + if (fid >= fragment_num()) { + throw FragmentInfoException( + "Cannot get MBR global order bound: Invalid fragment index"); + } + + if (!single_fragment_info_vec_[fid].sparse()) { + throw FragmentInfoException("Cannot get MBR; Fragment is not sparse"); + } + + auto& meta = single_fragment_info_vec_[fid].meta(); + meta->loaded_metadata()->load_fragment_tile_global_order_bounds(enc_key_); + + const auto& fixedPart = + meta->loaded_metadata()->tile_global_order_max_buffer(); + if (fixedPart.empty()) { + throw FragmentInfoException( + "Cannot get MBR global order bound: Unavailable"); + } + + const auto& varPart = + meta->loaded_metadata()->tile_global_order_max_var_buffer(); + + return read_global_order_bound_to_user_buffers( + *meta->array_schema(), + fixedPart, + varPart, + mid, + dimension_sizes, + dimensions); } Status FragmentInfo::get_version(uint32_t fid, uint32_t* version) const { diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index 32081e98757..8eeba503e38 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -350,40 +350,39 @@ void FragmentMetadata::set_tile_global_order_bounds_fixed( const auto dim = array_schema_->domain().get_dimension_index(dim_name); - if (array_schema_->domain().dimensions()[dim]->var_size()) { - const uint64_t* source_offsets = data.offset_tile().data_as(); + const auto& tile_min = data.global_order_min(); + iassert(tile_min.has_value()); + + const auto& tile_max = data.global_order_max(); + iassert(tile_max.has_value()); + if (array_schema_->domain().dimensions()[dim]->var_size()) { // NB: for now we set a length, and it will be updated to an offset // via `convert_tile_global_order_bounds_sizes_to_offsets`, // and then the var data will be written after that + uint64_t* min_sizes = reinterpret_cast( loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data()); - uint64_t* max_sizes = reinterpret_cast( - loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data()); + min_sizes[tile] = tile_min.value().size(); - const uint64_t fixed_offset = tile / sizeof(uint64_t); - max_sizes[fixed_offset] = - data.var_tile().size() - source_offsets[data.cell_num() - 1]; - if (data.cell_num() == 1) { - min_sizes[fixed_offset] = max_sizes[fixed_offset]; - } else { - min_sizes[fixed_offset] = source_offsets[1] - source_offsets[0]; - } + uint64_t* max_sizes = reinterpret_cast( + loaded_metadata_ptr_->tile_global_order_max_buffer()[dim].data()); + max_sizes[tile] = tile_max.value().size(); } else { const uint64_t fixed_size = array_schema_->domain().dimensions()[dim]->cell_size(); - const uint8_t* fixed_data = data.fixed_tile().data_as(); + iassert(tile_min.value().size() == fixed_size); + iassert(tile_max.value().size() == fixed_size); - void* min_data = - loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data(); - memcpy(min_data, &fixed_data[0], fixed_size); + const uint64_t offset = fixed_size * tile; - const uint64_t max_start = - data.fixed_tile() - .data_as()[fixed_size * (data.cell_num() - 1)]; - void* max_data = + uint8_t* min_data = loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data(); - memcpy(max_data, &fixed_data[max_start], fixed_size); + memcpy(&min_data[offset], tile_min.value().data(), fixed_size); + + uint8_t* max_data = + loaded_metadata_ptr_->tile_global_order_max_buffer()[dim].data(); + memcpy(&max_data[offset], tile_max.value().data(), fixed_size); } } @@ -394,7 +393,11 @@ void FragmentMetadata::set_tile_global_order_bounds_var( return; } - iassert(data.cell_num() > 0); + const auto& tile_min = data.global_order_min(); + iassert(tile_min.has_value()); + + const auto& tile_max = data.global_order_max(); + iassert(tile_max.has_value()); const uint64_t* min_sizes = reinterpret_cast( loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data()); @@ -407,18 +410,21 @@ void FragmentMetadata::set_tile_global_order_bounds_var( const uint64_t max_var_start = data_offsets[data.cell_num() - 1]; const uint64_t max_var_size = max_sizes[data.cell_num() - 1]; + iassert(tile_min.value().size() == min_var_size); + iassert(tile_max.value().size() == max_var_size); + if (min_var_size) { memcpy( &loaded_metadata_ptr_ ->tile_global_order_min_var_buffer()[tile][min_var_start], - data.var_tile().data_as(), + tile_min.value().data(), min_var_size); } if (max_var_size) { memcpy( &loaded_metadata_ptr_ ->tile_global_order_max_var_buffer()[tile][max_var_start], - data.var_tile().data_as(), + tile_max.value().data(), max_var_size); } } @@ -1271,7 +1277,8 @@ void FragmentMetadata::store_v15_or_higher( offset += nbytes; } - if (version_ >= constants::fragment_metadata_global_order_bounds_version) { + if (!array_schema_->dense() && + version_ >= constants::fragment_metadata_global_order_bounds_version) { const auto num_dims = array_schema_->dim_num(); // Store global order mins gt_offsets_.tile_global_order_min_offsets_.resize(num_dims); @@ -1285,7 +1292,7 @@ void FragmentMetadata::store_v15_or_higher( gt_offsets_.tile_global_order_max_offsets_.resize(num_dims); for (unsigned i = 0; i < num_dims; ++i) { gt_offsets_.tile_global_order_max_offsets_[i] = offset; - store_tile_global_order_mins(i, encryption_key, &nbytes); + store_tile_global_order_maxs(i, encryption_key, &nbytes); offset += nbytes; } } @@ -1362,6 +1369,15 @@ void FragmentMetadata::set_num_tiles(uint64_t num_tiles) { if (array_schema_->is_nullable(it.first)) loaded_metadata_ptr_->tile_null_counts()[i].resize(num_tiles, 0); } + + // Sparse arrays also store the global order lower/upper bounds + if (!array_schema_->dense() && is_dim) { + const unsigned dimension = i - array_schema_->dim_num(); + loaded_metadata_ptr_->tile_global_order_min_buffer()[dimension].resize( + num_tiles * cell_size, 0); + loaded_metadata_ptr_->tile_global_order_max_buffer()[dimension].resize( + num_tiles * cell_size, 0); + } } if (!dense_) { @@ -2516,6 +2532,19 @@ void FragmentMetadata::load_generic_tile_offsets_v16_or_higher( gt_offsets_.tile_max_offsets_.resize(num); deserializer.read(>_offsets_.tile_max_offsets_[0], num * sizeof(uint64_t)); + if (version_ >= constants::fragment_metadata_global_order_bounds_version) { + // Load offsets for the tile global order bounds + const auto num_dims = array_schema_->dim_num(); + gt_offsets_.tile_global_order_min_offsets_.resize(num_dims); + gt_offsets_.tile_global_order_max_offsets_.resize(num_dims); + deserializer.read( + gt_offsets_.tile_global_order_min_offsets_.data(), + num_dims * sizeof(uint64_t)); + deserializer.read( + gt_offsets_.tile_global_order_max_offsets_.data(), + num_dims * sizeof(uint64_t)); + } + // Load offsets for tile sum offsets gt_offsets_.tile_sum_offsets_.resize(num); deserializer.read(>_offsets_.tile_sum_offsets_[0], num * sizeof(uint64_t)); @@ -2757,6 +2786,17 @@ void FragmentMetadata::write_generic_tile_offsets( serializer.write(>_offsets_.tile_max_offsets_[0], num * sizeof(uint64_t)); } + if (version_ >= constants::fragment_metadata_global_order_bounds_version) { + // Write the tile global order bound offsets + const auto num_dims = array_schema_->dim_num(); + serializer.write( + gt_offsets_.tile_global_order_min_offsets_.data(), + num_dims * sizeof(uint64_t)); + serializer.write( + gt_offsets_.tile_global_order_max_offsets_.data(), + num_dims * sizeof(uint64_t)); + } + // Write tile sum offsets if (version_ >= 11) { serializer.write(>_offsets_.tile_sum_offsets_[0], num * sizeof(uint64_t)); diff --git a/tiledb/sm/fragment/loaded_fragment_metadata.cc b/tiledb/sm/fragment/loaded_fragment_metadata.cc index fbeb356f340..ddc29ed2fd6 100644 --- a/tiledb/sm/fragment/loaded_fragment_metadata.cc +++ b/tiledb/sm/fragment/loaded_fragment_metadata.cc @@ -67,6 +67,14 @@ LoadedFragmentMetadata::LoadedFragmentMetadata( , tile_max_buffer_(memory_tracker_->get_resource(MemoryType::TILE_MAX_VALS)) , tile_max_var_buffer_( memory_tracker_->get_resource(MemoryType::TILE_MAX_VALS)) + , tile_global_order_min_buffer_( + memory_tracker_->get_resource(MemoryType::TILE_GLOBAL_ORDER_MIN_VALS)) + , tile_global_order_max_buffer_( + memory_tracker_->get_resource(MemoryType::TILE_GLOBAL_ORDER_MAX_VALS)) + , tile_global_order_min_var_buffer_( + memory_tracker_->get_resource(MemoryType::TILE_GLOBAL_ORDER_MIN_VALS)) + , tile_global_order_max_var_buffer_( + memory_tracker_->get_resource(MemoryType::TILE_GLOBAL_ORDER_MAX_VALS)) , tile_sums_(memory_tracker_->get_resource(MemoryType::TILE_SUMS)) , tile_null_counts_( memory_tracker_->get_resource(MemoryType::TILE_NULL_COUNTS)) { @@ -221,6 +229,10 @@ void LoadedFragmentMetadata::resize_offsets(uint64_t size) { tile_min_var_buffer().resize(size); tile_max_buffer().resize(size); tile_max_var_buffer().resize(size); + tile_global_order_min_buffer().resize(size); + tile_global_order_min_var_buffer().resize(size); + tile_global_order_max_buffer().resize(size); + tile_global_order_max_var_buffer().resize(size); tile_sums().resize(size); tile_null_counts().resize(size); fragment_mins_.resize(size); @@ -233,6 +245,8 @@ void LoadedFragmentMetadata::resize_offsets(uint64_t size) { loaded_metadata_.tile_validity_offsets_.resize(size, false); loaded_metadata_.tile_min_.resize(size, false); loaded_metadata_.tile_max_.resize(size, false); + loaded_metadata_.tile_global_order_min_.resize(size, false); + loaded_metadata_.tile_global_order_max_.resize(size, false); loaded_metadata_.tile_sum_.resize(size, false); loaded_metadata_.tile_null_count_.resize(size, false); } diff --git a/tiledb/sm/fragment/loaded_fragment_metadata.h b/tiledb/sm/fragment/loaded_fragment_metadata.h index 0707441fb73..2580342c131 100644 --- a/tiledb/sm/fragment/loaded_fragment_metadata.h +++ b/tiledb/sm/fragment/loaded_fragment_metadata.h @@ -60,6 +60,8 @@ class LoadedFragmentMetadata { std::vector tile_validity_offsets_; std::vector tile_min_; std::vector tile_max_; + std::vector tile_global_order_min_; + std::vector tile_global_order_max_; std::vector tile_sum_; std::vector tile_null_count_; bool fragment_min_max_sum_null_count_ = false; @@ -332,6 +334,14 @@ class LoadedFragmentMetadata { virtual void load_fragment_min_max_sum_null_count( const EncryptionKey& encryption_key) = 0; + /** + * Loads the tile global order bounds for the fragment. + * + * @param encrpytion_key The key the array was opened with. + */ + virtual void load_fragment_tile_global_order_bounds( + const EncryptionKey& encryption_key) = 0; + /** * Loads the processed conditions for the fragment. The processed conditions * is the list of delete/update conditions that have already been applied for @@ -831,6 +841,24 @@ class LoadedFragmentMetadata { virtual void load_tile_max_values( const EncryptionKey& encryption_key, unsigned idx) = 0; + /** + * Loads the global order minimum values for the given dimension from storage. + * + * @param encryption_key The encryption key + * @param dimension Dimension index + */ + virtual void load_tile_global_order_min_values( + const EncryptionKey& encryption_key, unsigned dimension) = 0; + + /** + * Loads the global order minimum values for the given dimension from storage. + * + * @param encryption_key The encryption key + * @param dimension Dimension index + */ + virtual void load_tile_global_order_max_values( + const EncryptionKey& encryption_key, unsigned dimension) = 0; + /** * Loads the sum values for the input attribute idx from storage. * diff --git a/tiledb/sm/fragment/ondemand_fragment_metadata.cc b/tiledb/sm/fragment/ondemand_fragment_metadata.cc index 45f070ad4dd..d07dccb5a33 100644 --- a/tiledb/sm/fragment/ondemand_fragment_metadata.cc +++ b/tiledb/sm/fragment/ondemand_fragment_metadata.cc @@ -113,6 +113,19 @@ void OndemandFragmentMetadata::load_fragment_min_max_sum_null_count( loaded_metadata_.fragment_min_max_sum_null_count_ = true; } +void OndemandFragmentMetadata::load_fragment_tile_global_order_bounds( + const EncryptionKey& encryption_key) { + if (parent_fragment_.version_ < + constants::fragment_metadata_global_order_bounds_version) { + return; + } + + for (unsigned d = 0; d < parent_fragment_.array_schema_->dim_num(); d++) { + load_tile_global_order_min_values(encryption_key, d); + load_tile_global_order_max_values(encryption_key, d); + } +} + void OndemandFragmentMetadata::load_processed_conditions( const EncryptionKey& encryption_key) { if (loaded_metadata_.processed_conditions_) { @@ -307,18 +320,12 @@ void OndemandFragmentMetadata::load_tile_validity_offsets( } } -// ===== FORMAT ===== -// tile_min_values#0_size_buffer (uint64_t) -// tile_min_values#0_size_buffer_var (uint64_t) -// tile_min_values#0_buffer -// tile_min_values#0_buffer_var -// ... -// tile_min_values#_size_buffer (uint64_t) -// tile_min_values#_size_buffer_var (uint64_t) -// tile_min_values#_buffer -// tile_min_values#_buffer_var -void OndemandFragmentMetadata::load_tile_min_values( - unsigned idx, Deserializer& deserializer) { +static void load_tile_bound_values( + auto& fixed_buffer, + auto& var_buffer, + MemoryTracker* memory_tracker, + MemoryType mem_type, + Deserializer& deserializer) { uint64_t buffer_size = 0; uint64_t var_buffer_size = 0; @@ -331,26 +338,47 @@ void OndemandFragmentMetadata::load_tile_min_values( // Get tile mins if (buffer_size != 0) { auto size = buffer_size + var_buffer_size; - if (memory_tracker_ != nullptr && - !memory_tracker_->take_memory(size, MemoryType::TILE_MIN_VALS)) { + if (memory_tracker != nullptr && + !memory_tracker->take_memory(size, mem_type)) { throw FragmentMetadataStatusException( - "Cannot load min values; Insufficient memory budget; Needed " + - std::to_string(size) + " but only had " + - std::to_string(memory_tracker_->get_memory_available()) + + "Cannot load " + memory_type_to_str(mem_type) + + "; Insufficient memory budget; Needed " + std::to_string(size) + + " but only had " + + std::to_string(memory_tracker->get_memory_available()) + " from budget " + - std::to_string(memory_tracker_->get_memory_budget())); + std::to_string(memory_tracker->get_memory_budget())); } - tile_min_buffer_[idx].resize(buffer_size); - deserializer.read(&tile_min_buffer_[idx][0], buffer_size); + fixed_buffer.resize(buffer_size); + deserializer.read(&fixed_buffer[0], buffer_size); if (var_buffer_size) { - tile_min_var_buffer_[idx].resize(var_buffer_size); - deserializer.read(&tile_min_var_buffer_[idx][0], var_buffer_size); + var_buffer.resize(var_buffer_size); + deserializer.read(&var_buffer[0], var_buffer_size); } } } +// ===== FORMAT ===== +// tile_min_values#0_size_buffer (uint64_t) +// tile_min_values#0_size_buffer_var (uint64_t) +// tile_min_values#0_buffer +// tile_min_values#0_buffer_var +// ... +// tile_min_values#_size_buffer (uint64_t) +// tile_min_values#_size_buffer_var (uint64_t) +// tile_min_values#_buffer +// tile_min_values#_buffer_var +void OndemandFragmentMetadata::load_tile_min_values( + unsigned idx, Deserializer& deserializer) { + load_tile_bound_values( + tile_min_buffer_[idx], + tile_min_var_buffer_[idx], + memory_tracker_.get(), + MemoryType::TILE_MIN_VALS, + deserializer); +} + // ===== FORMAT ===== // tile_max_values#0_size_buffer (uint64_t) // tile_max_values#0_size_buffer_var (uint64_t) @@ -363,36 +391,52 @@ void OndemandFragmentMetadata::load_tile_min_values( // tile_max_values#_buffer_var void OndemandFragmentMetadata::load_tile_max_values( unsigned idx, Deserializer& deserializer) { - uint64_t buffer_size = 0; - uint64_t var_buffer_size = 0; - - // Get buffer size - buffer_size = deserializer.read(); - - // Get var buffer size - var_buffer_size = deserializer.read(); - - // Get tile maxs - if (buffer_size != 0) { - auto size = buffer_size + var_buffer_size; - if (memory_tracker_ != nullptr && - !memory_tracker_->take_memory(size, MemoryType::TILE_MAX_VALS)) { - throw FragmentMetadataStatusException( - "Cannot load max values; Insufficient memory budget; Needed " + - std::to_string(size) + " but only had " + - std::to_string(memory_tracker_->get_memory_available()) + - " from budget " + - std::to_string(memory_tracker_->get_memory_budget())); - } + load_tile_bound_values( + tile_max_buffer_[idx], + tile_max_var_buffer_[idx], + memory_tracker_.get(), + MemoryType::TILE_MAX_VALS, + deserializer); +} - tile_max_buffer_[idx].resize(buffer_size); - deserializer.read(&tile_max_buffer_[idx][0], buffer_size); +// ===== FORMAT ===== +// tile_global_order_min_values#0_size_buffer (uint64_t) +// tile_global_order_min_values#0_size_buffer_var (uint64_t) +// tile_global_order_min_values#0_buffer +// tile_global_order_min_values#0_buffer_var +// ... +// tile_global_order_min_values#_size_buffer (uint64_t) +// tile_global_order_min_values#_size_buffer_var (uint64_t) +// tile_global_order_min_values#_buffer +// tile_global_order_min_values#_buffer_var +void OndemandFragmentMetadata::load_tile_global_order_min_values( + unsigned dimension, Deserializer& deserializer) { + load_tile_bound_values( + tile_global_order_min_buffer_[dimension], + tile_global_order_min_var_buffer_[dimension], + memory_tracker_.get(), + MemoryType::TILE_GLOBAL_ORDER_MIN_VALS, + deserializer); +} - if (var_buffer_size) { - tile_max_var_buffer_[idx].resize(var_buffer_size); - deserializer.read(&tile_max_var_buffer_[idx][0], var_buffer_size); - } - } +// ===== FORMAT ===== +// tile_global_order_max_values#0_size_buffer (uint64_t) +// tile_global_order_max_values#0_size_buffer_var (uint64_t) +// tile_global_order_max_values#0_buffer +// tile_global_order_max_values#0_buffer_var +// ... +// tile_global_order_max_values#_size_buffer (uint64_t) +// tile_global_order_max_values#_size_buffer_var (uint64_t) +// tile_global_order_max_values#_buffer +// tile_global_order_max_values#_buffer_var +void OndemandFragmentMetadata::load_tile_global_order_max_values( + unsigned dimension, Deserializer& deserializer) { + load_tile_bound_values( + tile_global_order_max_buffer_[dimension], + tile_global_order_max_var_buffer_[dimension], + memory_tracker_.get(), + MemoryType::TILE_GLOBAL_ORDER_MAX_VALS, + deserializer); } // ===== FORMAT ===== @@ -577,6 +621,56 @@ void OndemandFragmentMetadata::load_tile_max_values( loaded_metadata_.tile_max_[idx] = true; } +void OndemandFragmentMetadata::load_tile_global_order_min_values( + const EncryptionKey& encryption_key, unsigned dimension) { + if (parent_fragment_.version_ < + constants::fragment_metadata_global_order_bounds_version) { + return; + } + + std::lock_guard lock(parent_fragment_.mtx_); + + if (loaded_metadata_.tile_global_order_min_[dimension]) { + return; + } + + auto tile = parent_fragment_.read_generic_tile_from_file( + encryption_key, + parent_fragment_.gt_offsets_.tile_global_order_min_offsets_[dimension]); + parent_fragment_.resources_->stats().add_counter( + "read_tile_global_order_min_size", tile->size()); + + Deserializer deserializer(tile->data(), tile->size()); + load_tile_global_order_min_values(dimension, deserializer); + + loaded_metadata_.tile_global_order_min_[dimension] = true; +} + +void OndemandFragmentMetadata::load_tile_global_order_max_values( + const EncryptionKey& encryption_key, unsigned dimension) { + if (parent_fragment_.version_ < + constants::fragment_metadata_global_order_bounds_version) { + return; + } + + std::lock_guard lock(parent_fragment_.mtx_); + + if (loaded_metadata_.tile_global_order_max_[dimension]) { + return; + } + + auto tile = parent_fragment_.read_generic_tile_from_file( + encryption_key, + parent_fragment_.gt_offsets_.tile_global_order_max_offsets_[dimension]); + parent_fragment_.resources_->stats().add_counter( + "read_tile_global_order_max_size", tile->size()); + + Deserializer deserializer(tile->data(), tile->size()); + load_tile_global_order_max_values(dimension, deserializer); + + loaded_metadata_.tile_global_order_max_[dimension] = true; +} + void OndemandFragmentMetadata::load_tile_sum_values( const EncryptionKey& encryption_key, unsigned idx) { if (parent_fragment_.version_ < constants::tile_metadata_min_version) { diff --git a/tiledb/sm/fragment/ondemand_fragment_metadata.h b/tiledb/sm/fragment/ondemand_fragment_metadata.h index 651741b7be3..e8573c88a0c 100644 --- a/tiledb/sm/fragment/ondemand_fragment_metadata.h +++ b/tiledb/sm/fragment/ondemand_fragment_metadata.h @@ -75,6 +75,14 @@ class OndemandFragmentMetadata : public LoadedFragmentMetadata { virtual void load_fragment_min_max_sum_null_count( const EncryptionKey& encryption_key) override; + /** + * Loads the tile global order bounds for the fragment. + * + * @param encrpytion_key The key the array was opened with. + */ + virtual void load_fragment_tile_global_order_bounds( + const EncryptionKey& encryption_key) override; + /** * Loads the processed conditions for the fragment. The processed conditions * is the list of delete/update conditions that have already been applied for @@ -223,6 +231,32 @@ class OndemandFragmentMetadata : public LoadedFragmentMetadata { virtual void load_tile_max_values( const EncryptionKey& encryption_key, unsigned idx) override; + /** + * Loads the tile global order minima for the dimension `dimension`. + */ + void load_tile_global_order_min_values( + unsigned dimension, Deserializer& deserializer); + + /** + * Loads the tile global order minima for the dimension `dimension` from + * storage. + */ + void load_tile_global_order_min_values( + const EncryptionKey& encryption_key, unsigned dimension) override; + + /** + * Loads the tile global order maxima for the dimension `dimension`. + */ + void load_tile_global_order_max_values( + unsigned dimension, Deserializer& deserializer); + + /** + * Loads the tile global order maxima for the dimension `dimension` from + * storage. + */ + void load_tile_global_order_max_values( + const EncryptionKey& encryption_key, unsigned dimension) override; + /** * Loads the sum values for the input attribute from the input buffer. * diff --git a/tiledb/sm/fragment/v1v2preloaded_fragment_metadata.cc b/tiledb/sm/fragment/v1v2preloaded_fragment_metadata.cc index 5156316161c..eb6b60e5833 100644 --- a/tiledb/sm/fragment/v1v2preloaded_fragment_metadata.cc +++ b/tiledb/sm/fragment/v1v2preloaded_fragment_metadata.cc @@ -165,6 +165,12 @@ void V1V2PreloadedFragmentMetadata::load_fragment_min_max_sum_null_count( return; } +void V1V2PreloadedFragmentMetadata::load_fragment_tile_global_order_bounds( + const EncryptionKey&) { + // N/A for v1_v2 preloaded meta + return; +} + /* ********************************* */ /* PRIVATE METHODS */ /* ********************************* */ @@ -205,6 +211,18 @@ void V1V2PreloadedFragmentMetadata::load_tile_max_values( return; } +void V1V2PreloadedFragmentMetadata::load_tile_global_order_min_values( + const EncryptionKey&, unsigned) { + // N/A for v1_v2 preloaded meta + return; +} + +void V1V2PreloadedFragmentMetadata::load_tile_global_order_max_values( + const EncryptionKey&, unsigned) { + // N/A for v1_v2 preloaded meta + return; +} + void V1V2PreloadedFragmentMetadata::load_tile_sum_values( const EncryptionKey&, unsigned) { // N/A for v1_v2 preloaded meta diff --git a/tiledb/sm/fragment/v1v2preloaded_fragment_metadata.h b/tiledb/sm/fragment/v1v2preloaded_fragment_metadata.h index 7c1e8e5e6bf..437a6da06b3 100644 --- a/tiledb/sm/fragment/v1v2preloaded_fragment_metadata.h +++ b/tiledb/sm/fragment/v1v2preloaded_fragment_metadata.h @@ -105,6 +105,14 @@ class V1V2PreloadedFragmentMetadata : public LoadedFragmentMetadata { virtual void load_fragment_min_max_sum_null_count( const EncryptionKey& encryption_key) override; + /** + * Loads the tile global order bounds for the fragment. + * + * @param encrpytion_key The key the array was opened with. + */ + virtual void load_fragment_tile_global_order_bounds( + const EncryptionKey& encryption_key) override; + /** * Loads the processed conditions for the fragment. The processed conditions * is the list of delete/update conditions that have already been applied for @@ -201,6 +209,30 @@ class V1V2PreloadedFragmentMetadata : public LoadedFragmentMetadata { virtual void load_tile_max_values( const EncryptionKey& encryption_key, unsigned idx) override; + /** + * Loads the global order minimum values for the given dimension from storage. + * + * This is a no-op because this field does not exist in format versions + * using this class. + * + * @param encryption_key The encryption key + * @param dimension Dimension index + */ + virtual void load_tile_global_order_min_values( + const EncryptionKey& encryption_key, unsigned dimension) override; + + /** + * Loads the global order maximum values for the given dimension from storage. + * + * This is a no-op because this field does not exist in format versions + * using this class. + * + * @param encryption_key The encryption key + * @param dimension Dimension index + */ + virtual void load_tile_global_order_max_values( + const EncryptionKey& encryption_key, unsigned dimension) override; + /** * Loads the sum values for the input attribute idx from storage. * diff --git a/tiledb/sm/query/writers/writer_base.cc b/tiledb/sm/query/writers/writer_base.cc index 8857f996250..2b152b0e237 100644 --- a/tiledb/sm/query/writers/writer_base.cc +++ b/tiledb/sm/query/writers/writer_base.cc @@ -1060,7 +1060,9 @@ Status WriterBase::write_tiles( } if (array_schema_.is_dim(attr)) { - frag_meta->convert_tile_global_order_bounds_sizes_to_offsets(attr); + if (array_schema_.var_size(attr)) { + frag_meta->convert_tile_global_order_bounds_sizes_to_offsets(attr); + } for (uint64_t idx = start_tile_idx; idx < end_tile_idx; idx++) { frag_meta->set_tile_global_order_bounds_var( attr, idx - start_tile_idx, tiles[idx]); diff --git a/tiledb/sm/serialization/fragment_metadata.cc b/tiledb/sm/serialization/fragment_metadata.cc index bd4ebb3e372..f2f91834881 100644 --- a/tiledb/sm/serialization/fragment_metadata.cc +++ b/tiledb/sm/serialization/fragment_metadata.cc @@ -100,6 +100,24 @@ void generic_tile_offsets_from_capnp( gt_offsets.tile_max_offsets_.emplace_back(tile_max_offset); } } + if (gt_reader.hasTileGlobalOrderMinOffsets()) { + auto tile_global_order_min_offsets = + gt_reader.getTileGlobalOrderMinOffsets(); + gt_offsets.tile_global_order_min_offsets_.reserve( + tile_global_order_min_offsets.size()); + for (const auto& offset : tile_global_order_min_offsets) { + gt_offsets.tile_global_order_min_offsets_.emplace_back(offset); + } + } + if (gt_reader.hasTileGlobalOrderMaxOffsets()) { + auto tile_global_order_max_offsets = + gt_reader.getTileGlobalOrderMaxOffsets(); + gt_offsets.tile_global_order_max_offsets_.reserve( + tile_global_order_max_offsets.size()); + for (const auto& offset : tile_global_order_max_offsets) { + gt_offsets.tile_global_order_max_offsets_.emplace_back(offset); + } + } if (gt_reader.hasTileSumOffsets()) { auto tile_sum_offsets = gt_reader.getTileSumOffsets(); gt_offsets.tile_sum_offsets_.reserve(tile_sum_offsets.size()); @@ -120,6 +138,16 @@ void generic_tile_offsets_from_capnp( gt_reader.getProcessedConditionsOffsets(); } +static void tile_bounds_from_capnp(const auto& reader, auto& buffers) { + for (const auto& t : reader) { + auto& last = buffers.emplace_back(); + last.reserve(t.size()); + for (const auto& v : t) { + last.emplace_back(v); + } + } +} + Status fragment_metadata_from_capnp( const shared_ptr& fragment_array_schema, const capnp::FragmentMetadata::Reader& frag_meta_reader, @@ -258,49 +286,47 @@ Status fragment_metadata_from_capnp( } if (frag_meta_reader.hasTileMinBuffer()) { auto tileminbuffer_reader = frag_meta_reader.getTileMinBuffer(); - for (const auto& t : tileminbuffer_reader) { - auto& last = - frag_meta->loaded_metadata()->tile_min_buffer().emplace_back(); - last.reserve(t.size()); - for (const auto& v : t) { - last.emplace_back(v); - } - } + tile_bounds_from_capnp( + tileminbuffer_reader, frag_meta->loaded_metadata()->tile_min_buffer()); loaded_metadata.tile_min_.resize(tileminbuffer_reader.size(), false); } if (frag_meta_reader.hasTileMinVarBuffer()) { - auto tileminvarbuffer_reader = frag_meta_reader.getTileMinVarBuffer(); - for (const auto& t : tileminvarbuffer_reader) { - auto& last = - frag_meta->loaded_metadata()->tile_min_var_buffer().emplace_back(); - last.reserve(t.size()); - for (const auto& v : t) { - last.emplace_back(v); - } - } + tile_bounds_from_capnp( + frag_meta_reader.getTileMinVarBuffer(), + frag_meta->loaded_metadata()->tile_min_var_buffer()); } if (frag_meta_reader.hasTileMaxBuffer()) { auto tilemaxbuffer_reader = frag_meta_reader.getTileMaxBuffer(); - for (const auto& t : tilemaxbuffer_reader) { - auto& last = - frag_meta->loaded_metadata()->tile_max_buffer().emplace_back(); - last.reserve(t.size()); - for (const auto& v : t) { - last.emplace_back(v); - } - } + tile_bounds_from_capnp( + tilemaxbuffer_reader, frag_meta->loaded_metadata()->tile_max_buffer()); loaded_metadata.tile_max_.resize(tilemaxbuffer_reader.size(), false); } if (frag_meta_reader.hasTileMaxVarBuffer()) { - auto tilemaxvarbuffer_reader = frag_meta_reader.getTileMaxVarBuffer(); - for (const auto& t : tilemaxvarbuffer_reader) { - auto& last = - frag_meta->loaded_metadata()->tile_max_var_buffer().emplace_back(); - last.reserve(t.size()); - for (const auto& v : t) { - last.emplace_back(v); - } - } + tile_bounds_from_capnp( + frag_meta_reader.getTileMaxVarBuffer(), + frag_meta->loaded_metadata()->tile_max_var_buffer()); + } + if (frag_meta_reader.hasTileGlobalOrderMinBuffer()) { + auto reader = frag_meta_reader.getTileGlobalOrderMinBuffer(); + tile_bounds_from_capnp( + reader, frag_meta->loaded_metadata()->tile_global_order_min_buffer()); + loaded_metadata.tile_global_order_min_.resize(reader.size(), false); + } + if (frag_meta_reader.hasTileGlobalOrderMinVarBuffer()) { + tile_bounds_from_capnp( + frag_meta_reader.getTileGlobalOrderMinVarBuffer(), + frag_meta->loaded_metadata()->tile_global_order_min_var_buffer()); + } + if (frag_meta_reader.hasTileGlobalOrderMaxBuffer()) { + auto reader = frag_meta_reader.getTileGlobalOrderMaxBuffer(); + tile_bounds_from_capnp( + reader, frag_meta->loaded_metadata()->tile_global_order_max_buffer()); + loaded_metadata.tile_global_order_max_.resize(reader.size(), false); + } + if (frag_meta_reader.hasTileGlobalOrderMaxVarBuffer()) { + tile_bounds_from_capnp( + frag_meta_reader.getTileGlobalOrderMaxVarBuffer(), + frag_meta->loaded_metadata()->tile_global_order_max_var_buffer()); } if (frag_meta_reader.hasTileSums()) { auto tilesums_reader = frag_meta_reader.getTileSums(); @@ -467,6 +493,24 @@ void generic_tile_offsets_to_capnp( builder.set(i, gt_tile_max_offsets[i]); } } + auto& gt_tile_global_order_min_offsets = + gt_offsets.tile_global_order_min_offsets_; + if (!gt_tile_global_order_min_offsets.empty()) { + auto builder = gt_offsets_builder.initTileGlobalOrderMinOffsets( + gt_tile_min_offsets.size()); + for (uint64_t i = 0; i < gt_tile_global_order_min_offsets.size(); ++i) { + builder.set(i, gt_tile_global_order_min_offsets[i]); + } + } + auto& gt_tile_global_order_max_offsets = + gt_offsets.tile_global_order_max_offsets_; + if (!gt_tile_global_order_max_offsets.empty()) { + auto builder = gt_offsets_builder.initTileGlobalOrderMaxOffsets( + gt_tile_max_offsets.size()); + for (uint64_t i = 0; i < gt_tile_global_order_max_offsets.size(); ++i) { + builder.set(i, gt_tile_global_order_max_offsets[i]); + } + } auto& gt_tile_sum_offsets = gt_offsets.tile_sum_offsets_; if (!gt_tile_sum_offsets.empty()) { auto builder = @@ -537,6 +581,32 @@ void fragment_meta_sizes_offsets_to_capnp( } } +static void tile_bounds_to_capnp( + const auto& fixedBuffers, + const auto& varBuffers, + auto initFixedBuilder, + auto initVarBuilder) { + if (!fixedBuffers.empty()) { + auto builder = initFixedBuilder(fixedBuffers.size()); + for (uint64_t i = 0; i < fixedBuffers.size(); ++i) { + builder.init(i, fixedBuffers[i].size()); + for (uint64_t j = 0; j < fixedBuffers[i].size(); ++j) { + builder[i].set(j, fixedBuffers[i][j]); + } + } + } + + if (!varBuffers.empty()) { + auto builder = initVarBuilder(varBuffers.size()); + for (uint64_t i = 0; i < varBuffers.size(); ++i) { + builder.init(i, varBuffers[i].size()); + for (uint64_t j = 0; j < varBuffers[i].size(); ++j) { + builder[i].set(j, varBuffers[i][j]); + } + } + } +} + Status fragment_metadata_to_capnp( const FragmentMetadata& frag_meta, capnp::FragmentMetadata::Builder* frag_meta_builder) { @@ -573,50 +643,46 @@ Status fragment_metadata_to_capnp( } } - auto& tile_min_buffer = frag_meta.loaded_metadata()->tile_min_buffer(); - if (!tile_min_buffer.empty()) { - auto builder = frag_meta_builder->initTileMinBuffer(tile_min_buffer.size()); - for (uint64_t i = 0; i < tile_min_buffer.size(); ++i) { - builder.init(i, tile_min_buffer[i].size()); - for (uint64_t j = 0; j < tile_min_buffer[i].size(); ++j) { - builder[i].set(j, tile_min_buffer[i][j]); - } - } - } - auto& tile_min_var_buffer = - frag_meta.loaded_metadata()->tile_min_var_buffer(); - if (!tile_min_var_buffer.empty()) { - auto builder = - frag_meta_builder->initTileMinVarBuffer(tile_min_var_buffer.size()); - for (uint64_t i = 0; i < tile_min_var_buffer.size(); ++i) { - builder.init(i, tile_min_var_buffer[i].size()); - for (uint64_t j = 0; j < tile_min_var_buffer[i].size(); ++j) { - builder[i].set(j, tile_min_var_buffer[i][j]); - } - } - } - auto& tile_max_buffer = frag_meta.loaded_metadata()->tile_max_buffer(); - if (!tile_max_buffer.empty()) { - auto builder = frag_meta_builder->initTileMaxBuffer(tile_max_buffer.size()); - for (uint64_t i = 0; i < tile_max_buffer.size(); ++i) { - builder.init(i, tile_max_buffer[i].size()); - for (uint64_t j = 0; j < tile_max_buffer[i].size(); ++j) { - builder[i].set(j, tile_max_buffer[i][j]); - } - } - } - auto& tile_max_var_buffer = - frag_meta.loaded_metadata()->tile_max_var_buffer(); - if (!tile_max_var_buffer.empty()) { - auto builder = - frag_meta_builder->initTileMaxVarBuffer(tile_max_var_buffer.size()); - for (uint64_t i = 0; i < tile_max_var_buffer.size(); ++i) { - builder.init(i, tile_max_var_buffer[i].size()); - for (uint64_t j = 0; j < tile_max_var_buffer[i].size(); ++j) { - builder[i].set(j, tile_max_var_buffer[i][j]); - } - } - } + const auto& lm = *frag_meta.loaded_metadata(); + + tile_bounds_to_capnp( + lm.tile_min_buffer(), + lm.tile_min_var_buffer(), + [&frag_meta_builder](auto size) { + return frag_meta_builder->initTileMinBuffer(size); + }, + [&frag_meta_builder](auto size) { + return frag_meta_builder->initTileMinVarBuffer(size); + }); + tile_bounds_to_capnp( + lm.tile_max_buffer(), + lm.tile_max_var_buffer(), + [&frag_meta_builder](auto size) { + return frag_meta_builder->initTileMaxBuffer(size); + }, + [&frag_meta_builder](auto size) { + return frag_meta_builder->initTileMaxVarBuffer(size); + }); + + tile_bounds_to_capnp( + lm.tile_global_order_min_buffer(), + lm.tile_global_order_min_var_buffer(), + [&frag_meta_builder](auto size) { + return frag_meta_builder->initTileGlobalOrderMinBuffer(size); + }, + [&frag_meta_builder](auto size) { + return frag_meta_builder->initTileGlobalOrderMinVarBuffer(size); + }); + tile_bounds_to_capnp( + lm.tile_global_order_max_buffer(), + lm.tile_global_order_min_var_buffer(), + [&frag_meta_builder](auto size) { + return frag_meta_builder->initTileGlobalOrderMaxBuffer(size); + }, + [&frag_meta_builder](auto size) { + return frag_meta_builder->initTileGlobalOrderMaxVarBuffer(size); + }); + auto& tile_sums = frag_meta.loaded_metadata()->tile_sums(); if (!tile_sums.empty()) { auto builder = frag_meta_builder->initTileSums(tile_sums.size()); diff --git a/tiledb/sm/serialization/tiledb-rest.capnp b/tiledb/sm/serialization/tiledb-rest.capnp index a876c0e24a9..78ab7ce4e66 100644 --- a/tiledb/sm/serialization/tiledb-rest.capnp +++ b/tiledb/sm/serialization/tiledb-rest.capnp @@ -1093,6 +1093,10 @@ struct FragmentMetadata { # fragment min/max/sum/nullcount offsets processedConditionsOffsets @10 :UInt64; # processed conditions offsets + tileGlobalOrderMinOffsets @11 :List(UInt64); + # global order min tile offsets + tileGlobalOrderMaxOffsets @12 :List(UInt64); + # global order max tile offsets } fileSizes @0 :List(UInt64); @@ -1184,6 +1188,18 @@ struct FragmentMetadata { arraySchemaName @29 :Text; # array schema name + + tileGlobalOrderMinBuffer @30 :List(List(UInt8)); + # tile global order min buffers + + tileGlobalOrderMinVarBuffer @31 :List(List(UInt8)); + # tile global order min buffers for var length dimensions + + tileGlobalOrderMaxBuffer @32 :List(List(UInt8)); + # tile global order max buffers + + tileGlobalOrderMaxVarBuffer @33 :List(List(UInt8)); + # tile global order max buffers for var length dimensions } struct MultiPartUploadState { diff --git a/tiledb/sm/serialization/tiledb-rest.capnp.c++ b/tiledb/sm/serialization/tiledb-rest.capnp.c++ index 72f7285c9d2..c0829129620 100644 --- a/tiledb/sm/serialization/tiledb-rest.capnp.c++ +++ b/tiledb/sm/serialization/tiledb-rest.capnp.c++ @@ -8355,18 +8355,18 @@ const ::capnp::_::RawSchema s_d8bd3c0dec37b773 = { 0, 3, i_d8bd3c0dec37b773, nullptr, nullptr, { &s_d8bd3c0dec37b773, nullptr, nullptr, 0, 0, nullptr }, false }; #endif // !CAPNP_LITE -static const ::capnp::_::AlignedData<629> b_cde352fc27e7ca61 = { +static const ::capnp::_::AlignedData<733> b_cde352fc27e7ca61 = { { 0, 0, 0, 0, 6, 0, 6, 0, 97, 202, 231, 39, 252, 82, 227, 205, 18, 0, 0, 0, 1, 0, 4, 0, 127, 216, 135, 181, 36, 146, 125, 181, - 23, 0, 7, 0, 0, 0, 0, 0, + 27, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 13, 91, 0, 0, 66, 103, 0, 0, + 13, 91, 0, 0, 134, 105, 0, 0, 21, 0, 0, 0, 26, 1, 0, 0, 37, 0, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 53, 0, 0, 0, 151, 6, 0, 0, + 53, 0, 0, 0, 119, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 116, 105, 108, 101, 100, 98, 45, 114, @@ -8380,217 +8380,245 @@ static const ::capnp::_::AlignedData<629> b_cde352fc27e7ca61 = { 71, 101, 110, 101, 114, 105, 99, 84, 105, 108, 101, 79, 102, 102, 115, 101, 116, 115, 0, 0, 0, 0, 0, 0, - 120, 0, 0, 0, 3, 0, 4, 0, + 136, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 57, 3, 0, 0, 82, 0, 0, 0, + 169, 3, 0, 0, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 56, 3, 0, 0, 3, 0, 1, 0, - 84, 3, 0, 0, 2, 0, 1, 0, + 168, 3, 0, 0, 3, 0, 1, 0, + 196, 3, 0, 0, 2, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 81, 3, 0, 0, 106, 0, 0, 0, + 193, 3, 0, 0, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 80, 3, 0, 0, 3, 0, 1, 0, - 108, 3, 0, 0, 2, 0, 1, 0, + 192, 3, 0, 0, 3, 0, 1, 0, + 220, 3, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 105, 3, 0, 0, 146, 0, 0, 0, + 217, 3, 0, 0, 146, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 108, 3, 0, 0, 3, 0, 1, 0, - 136, 3, 0, 0, 2, 0, 1, 0, + 220, 3, 0, 0, 3, 0, 1, 0, + 248, 3, 0, 0, 2, 0, 1, 0, 3, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 133, 3, 0, 0, 98, 0, 0, 0, + 245, 3, 0, 0, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 132, 3, 0, 0, 3, 0, 1, 0, - 144, 3, 0, 0, 2, 0, 1, 0, + 244, 3, 0, 0, 3, 0, 1, 0, + 0, 4, 0, 0, 2, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 141, 3, 0, 0, 114, 0, 0, 0, + 253, 3, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 140, 3, 0, 0, 3, 0, 1, 0, - 152, 3, 0, 0, 2, 0, 1, 0, + 252, 3, 0, 0, 3, 0, 1, 0, + 8, 4, 0, 0, 2, 0, 1, 0, 5, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 149, 3, 0, 0, 114, 0, 0, 0, + 5, 4, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 148, 3, 0, 0, 3, 0, 1, 0, - 160, 3, 0, 0, 2, 0, 1, 0, + 4, 4, 0, 0, 3, 0, 1, 0, + 16, 4, 0, 0, 2, 0, 1, 0, 6, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 157, 3, 0, 0, 114, 0, 0, 0, + 13, 4, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 156, 3, 0, 0, 3, 0, 1, 0, - 168, 3, 0, 0, 2, 0, 1, 0, + 12, 4, 0, 0, 3, 0, 1, 0, + 24, 4, 0, 0, 2, 0, 1, 0, 7, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 165, 3, 0, 0, 114, 0, 0, 0, + 21, 4, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 164, 3, 0, 0, 3, 0, 1, 0, - 176, 3, 0, 0, 2, 0, 1, 0, + 20, 4, 0, 0, 3, 0, 1, 0, + 32, 4, 0, 0, 2, 0, 1, 0, 8, 0, 0, 0, 4, 0, 0, 0, 0, 0, 1, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 173, 3, 0, 0, 98, 0, 0, 0, + 29, 4, 0, 0, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 172, 3, 0, 0, 3, 0, 1, 0, - 216, 3, 0, 0, 2, 0, 1, 0, + 28, 4, 0, 0, 3, 0, 1, 0, + 72, 4, 0, 0, 2, 0, 1, 0, 9, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 213, 3, 0, 0, 122, 0, 0, 0, + 69, 4, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 212, 3, 0, 0, 3, 0, 1, 0, - 0, 4, 0, 0, 2, 0, 1, 0, + 68, 4, 0, 0, 3, 0, 1, 0, + 112, 4, 0, 0, 2, 0, 1, 0, 10, 0, 0, 0, 6, 0, 0, 0, 0, 0, 1, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 253, 3, 0, 0, 106, 0, 0, 0, + 109, 4, 0, 0, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 252, 3, 0, 0, 3, 0, 1, 0, - 40, 4, 0, 0, 2, 0, 1, 0, + 108, 4, 0, 0, 3, 0, 1, 0, + 152, 4, 0, 0, 2, 0, 1, 0, 11, 0, 0, 0, 7, 0, 0, 0, 0, 0, 1, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 37, 4, 0, 0, 162, 0, 0, 0, + 149, 4, 0, 0, 162, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 40, 4, 0, 0, 3, 0, 1, 0, - 84, 4, 0, 0, 2, 0, 1, 0, + 152, 4, 0, 0, 3, 0, 1, 0, + 196, 4, 0, 0, 2, 0, 1, 0, 12, 0, 0, 0, 8, 0, 0, 0, 0, 0, 1, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 81, 4, 0, 0, 114, 0, 0, 0, + 193, 4, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 80, 4, 0, 0, 3, 0, 1, 0, - 124, 4, 0, 0, 2, 0, 1, 0, + 192, 4, 0, 0, 3, 0, 1, 0, + 236, 4, 0, 0, 2, 0, 1, 0, 13, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 121, 4, 0, 0, 138, 0, 0, 0, + 233, 4, 0, 0, 138, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 124, 4, 0, 0, 3, 0, 1, 0, - 168, 4, 0, 0, 2, 0, 1, 0, + 236, 4, 0, 0, 3, 0, 1, 0, + 24, 5, 0, 0, 2, 0, 1, 0, 14, 0, 0, 0, 10, 0, 0, 0, 0, 0, 1, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 165, 4, 0, 0, 114, 0, 0, 0, + 21, 5, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 164, 4, 0, 0, 3, 0, 1, 0, - 208, 4, 0, 0, 2, 0, 1, 0, + 20, 5, 0, 0, 3, 0, 1, 0, + 64, 5, 0, 0, 2, 0, 1, 0, 15, 0, 0, 0, 11, 0, 0, 0, 0, 0, 1, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 205, 4, 0, 0, 138, 0, 0, 0, + 61, 5, 0, 0, 138, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 208, 4, 0, 0, 3, 0, 1, 0, - 252, 4, 0, 0, 2, 0, 1, 0, + 64, 5, 0, 0, 3, 0, 1, 0, + 108, 5, 0, 0, 2, 0, 1, 0, 16, 0, 0, 0, 12, 0, 0, 0, 0, 0, 1, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 249, 4, 0, 0, 74, 0, 0, 0, + 105, 5, 0, 0, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 248, 4, 0, 0, 3, 0, 1, 0, - 36, 5, 0, 0, 2, 0, 1, 0, + 104, 5, 0, 0, 3, 0, 1, 0, + 148, 5, 0, 0, 2, 0, 1, 0, 17, 0, 0, 0, 13, 0, 0, 0, 0, 0, 1, 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 33, 5, 0, 0, 122, 0, 0, 0, + 145, 5, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 32, 5, 0, 0, 3, 0, 1, 0, - 76, 5, 0, 0, 2, 0, 1, 0, + 144, 5, 0, 0, 3, 0, 1, 0, + 188, 5, 0, 0, 2, 0, 1, 0, 18, 0, 0, 0, 14, 0, 0, 0, 0, 0, 1, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 73, 5, 0, 0, 106, 0, 0, 0, + 185, 5, 0, 0, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 72, 5, 0, 0, 3, 0, 1, 0, - 116, 5, 0, 0, 2, 0, 1, 0, + 184, 5, 0, 0, 3, 0, 1, 0, + 228, 5, 0, 0, 2, 0, 1, 0, 19, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 113, 5, 0, 0, 106, 0, 0, 0, + 225, 5, 0, 0, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 112, 5, 0, 0, 3, 0, 1, 0, - 156, 5, 0, 0, 2, 0, 1, 0, + 224, 5, 0, 0, 3, 0, 1, 0, + 12, 6, 0, 0, 2, 0, 1, 0, 20, 0, 0, 0, 16, 0, 0, 0, 0, 0, 1, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 153, 5, 0, 0, 106, 0, 0, 0, + 9, 6, 0, 0, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 152, 5, 0, 0, 3, 0, 1, 0, - 180, 5, 0, 0, 2, 0, 1, 0, + 8, 6, 0, 0, 3, 0, 1, 0, + 36, 6, 0, 0, 2, 0, 1, 0, 21, 0, 0, 0, 17, 0, 0, 0, 0, 0, 1, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 177, 5, 0, 0, 154, 0, 0, 0, + 33, 6, 0, 0, 154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 180, 5, 0, 0, 3, 0, 1, 0, - 208, 5, 0, 0, 2, 0, 1, 0, + 36, 6, 0, 0, 3, 0, 1, 0, + 64, 6, 0, 0, 2, 0, 1, 0, 22, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 205, 5, 0, 0, 66, 0, 0, 0, + 61, 6, 0, 0, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 200, 5, 0, 0, 3, 0, 1, 0, - 212, 5, 0, 0, 2, 0, 1, 0, + 56, 6, 0, 0, 3, 0, 1, 0, + 68, 6, 0, 0, 2, 0, 1, 0, 23, 0, 0, 0, 18, 0, 0, 0, 0, 0, 1, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 209, 5, 0, 0, 122, 0, 0, 0, + 65, 6, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 208, 5, 0, 0, 3, 0, 1, 0, - 236, 5, 0, 0, 2, 0, 1, 0, + 64, 6, 0, 0, 3, 0, 1, 0, + 92, 6, 0, 0, 2, 0, 1, 0, 24, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 233, 5, 0, 0, 130, 0, 0, 0, + 89, 6, 0, 0, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 232, 5, 0, 0, 3, 0, 1, 0, - 244, 5, 0, 0, 2, 0, 1, 0, + 88, 6, 0, 0, 3, 0, 1, 0, + 100, 6, 0, 0, 2, 0, 1, 0, 25, 0, 0, 0, 19, 0, 0, 0, 0, 0, 1, 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 241, 5, 0, 0, 122, 0, 0, 0, + 97, 6, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 240, 5, 0, 0, 3, 0, 1, 0, - 252, 5, 0, 0, 2, 0, 1, 0, + 96, 6, 0, 0, 3, 0, 1, 0, + 108, 6, 0, 0, 2, 0, 1, 0, 26, 0, 0, 0, 20, 0, 0, 0, 0, 0, 1, 0, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 249, 5, 0, 0, 50, 0, 0, 0, + 105, 6, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 244, 5, 0, 0, 3, 0, 1, 0, - 0, 6, 0, 0, 2, 0, 1, 0, + 100, 6, 0, 0, 3, 0, 1, 0, + 112, 6, 0, 0, 2, 0, 1, 0, 27, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 253, 5, 0, 0, 178, 0, 0, 0, + 109, 6, 0, 0, 178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 6, 0, 0, 3, 0, 1, 0, - 12, 6, 0, 0, 2, 0, 1, 0, + 112, 6, 0, 0, 3, 0, 1, 0, + 124, 6, 0, 0, 2, 0, 1, 0, 28, 0, 0, 0, 21, 0, 0, 0, 0, 0, 1, 0, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 9, 6, 0, 0, 82, 0, 0, 0, + 121, 6, 0, 0, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 8, 6, 0, 0, 3, 0, 1, 0, - 20, 6, 0, 0, 2, 0, 1, 0, + 120, 6, 0, 0, 3, 0, 1, 0, + 132, 6, 0, 0, 2, 0, 1, 0, 29, 0, 0, 0, 22, 0, 0, 0, 0, 0, 1, 0, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 17, 6, 0, 0, 130, 0, 0, 0, + 129, 6, 0, 0, 130, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 128, 6, 0, 0, 3, 0, 1, 0, + 140, 6, 0, 0, 2, 0, 1, 0, + 30, 0, 0, 0, 23, 0, 0, 0, + 0, 0, 1, 0, 30, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 137, 6, 0, 0, 202, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 144, 6, 0, 0, 3, 0, 1, 0, + 188, 6, 0, 0, 2, 0, 1, 0, + 31, 0, 0, 0, 24, 0, 0, 0, + 0, 0, 1, 0, 31, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 185, 6, 0, 0, 226, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 192, 6, 0, 0, 3, 0, 1, 0, + 236, 6, 0, 0, 2, 0, 1, 0, + 32, 0, 0, 0, 25, 0, 0, 0, + 0, 0, 1, 0, 32, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 233, 6, 0, 0, 202, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 240, 6, 0, 0, 3, 0, 1, 0, + 28, 7, 0, 0, 2, 0, 1, 0, + 33, 0, 0, 0, 26, 0, 0, 0, + 0, 0, 1, 0, 33, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 25, 7, 0, 0, 226, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 16, 6, 0, 0, 3, 0, 1, 0, - 28, 6, 0, 0, 2, 0, 1, 0, + 32, 7, 0, 0, 3, 0, 1, 0, + 76, 7, 0, 0, 2, 0, 1, 0, 102, 105, 108, 101, 83, 105, 122, 101, 115, 0, 0, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, @@ -8983,6 +9011,82 @@ static const ::capnp::_::AlignedData<629> b_cde352fc27e7ca61 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 116, 105, 108, 101, 71, 108, 111, 98, + 97, 108, 79, 114, 100, 101, 114, 77, + 105, 110, 66, 117, 102, 102, 101, 114, + 0, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 116, 105, 108, 101, 71, 108, 111, 98, + 97, 108, 79, 114, 100, 101, 114, 77, + 105, 110, 86, 97, 114, 66, 117, 102, + 102, 101, 114, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 116, 105, 108, 101, 71, 108, 111, 98, + 97, 108, 79, 114, 100, 101, 114, 77, + 97, 120, 66, 117, 102, 102, 101, 114, + 0, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 116, 105, 108, 101, 71, 108, 111, 98, + 97, 108, 79, 114, 100, 101, 114, 77, + 97, 120, 86, 97, 114, 66, 117, 102, + 102, 101, 114, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }; @@ -8992,25 +9096,25 @@ static const ::capnp::_::RawSchema* const d_cde352fc27e7ca61[] = { &s_89aa8f4e88036b9e, &s_a18264549448ece3, }; -static const uint16_t m_cde352fc27e7ca61[] = {29, 0, 2, 1, 19, 18, 21, 20, 3, 28, 27, 5, 4, 24, 25, 26, 6, 7, 14, 15, 12, 13, 17, 8, 16, 11, 9, 10, 23, 22}; -static const uint16_t i_cde352fc27e7ca61[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29}; +static const uint16_t m_cde352fc27e7ca61[] = {29, 0, 2, 1, 19, 18, 21, 20, 3, 28, 27, 5, 4, 24, 25, 26, 6, 32, 33, 30, 31, 7, 14, 15, 12, 13, 17, 8, 16, 11, 9, 10, 23, 22}; +static const uint16_t i_cde352fc27e7ca61[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33}; const ::capnp::_::RawSchema s_cde352fc27e7ca61 = { - 0xcde352fc27e7ca61, b_cde352fc27e7ca61.words, 629, d_cde352fc27e7ca61, m_cde352fc27e7ca61, - 2, 30, i_cde352fc27e7ca61, nullptr, nullptr, { &s_cde352fc27e7ca61, nullptr, nullptr, 0, 0, nullptr }, false + 0xcde352fc27e7ca61, b_cde352fc27e7ca61.words, 733, d_cde352fc27e7ca61, m_cde352fc27e7ca61, + 2, 34, i_cde352fc27e7ca61, nullptr, nullptr, { &s_cde352fc27e7ca61, nullptr, nullptr, 0, 0, nullptr }, false }; #endif // !CAPNP_LITE -static const ::capnp::_::AlignedData<236> b_89aa8f4e88036b9e = { +static const ::capnp::_::AlignedData<280> b_89aa8f4e88036b9e = { { 0, 0, 0, 0, 6, 0, 6, 0, 158, 107, 3, 136, 78, 143, 170, 137, 35, 0, 0, 0, 1, 0, 3, 0, 97, 202, 231, 39, 252, 82, 227, 205, - 8, 0, 7, 0, 0, 0, 0, 0, + 10, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 41, 91, 0, 0, 102, 94, 0, 0, + 41, 91, 0, 0, 24, 95, 0, 0, 21, 0, 0, 0, 178, 1, 0, 0, 45, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 41, 0, 0, 0, 111, 2, 0, 0, + 41, 0, 0, 0, 223, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 116, 105, 108, 101, 100, 98, 45, 114, @@ -9021,84 +9125,98 @@ static const ::capnp::_::AlignedData<236> b_89aa8f4e88036b9e = { 105, 99, 84, 105, 108, 101, 79, 102, 102, 115, 101, 116, 115, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, - 44, 0, 0, 0, 3, 0, 4, 0, + 52, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 37, 1, 0, 0, 50, 0, 0, 0, + 93, 1, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 32, 1, 0, 0, 3, 0, 1, 0, - 44, 1, 0, 0, 2, 0, 1, 0, + 88, 1, 0, 0, 3, 0, 1, 0, + 100, 1, 0, 0, 2, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 41, 1, 0, 0, 98, 0, 0, 0, + 97, 1, 0, 0, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 40, 1, 0, 0, 3, 0, 1, 0, - 68, 1, 0, 0, 2, 0, 1, 0, + 96, 1, 0, 0, 3, 0, 1, 0, + 124, 1, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 65, 1, 0, 0, 122, 0, 0, 0, + 121, 1, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 64, 1, 0, 0, 3, 0, 1, 0, - 92, 1, 0, 0, 2, 0, 1, 0, + 120, 1, 0, 0, 3, 0, 1, 0, + 148, 1, 0, 0, 2, 0, 1, 0, 3, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 89, 1, 0, 0, 106, 0, 0, 0, + 145, 1, 0, 0, 106, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 88, 1, 0, 0, 3, 0, 1, 0, - 116, 1, 0, 0, 2, 0, 1, 0, + 144, 1, 0, 0, 3, 0, 1, 0, + 172, 1, 0, 0, 2, 0, 1, 0, 4, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 113, 1, 0, 0, 162, 0, 0, 0, + 169, 1, 0, 0, 162, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 116, 1, 0, 0, 3, 0, 1, 0, - 144, 1, 0, 0, 2, 0, 1, 0, + 172, 1, 0, 0, 3, 0, 1, 0, + 200, 1, 0, 0, 2, 0, 1, 0, 5, 0, 0, 0, 4, 0, 0, 0, 0, 0, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 141, 1, 0, 0, 122, 0, 0, 0, + 197, 1, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 140, 1, 0, 0, 3, 0, 1, 0, - 168, 1, 0, 0, 2, 0, 1, 0, + 196, 1, 0, 0, 3, 0, 1, 0, + 224, 1, 0, 0, 2, 0, 1, 0, 6, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 165, 1, 0, 0, 122, 0, 0, 0, + 221, 1, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 164, 1, 0, 0, 3, 0, 1, 0, - 192, 1, 0, 0, 2, 0, 1, 0, + 220, 1, 0, 0, 3, 0, 1, 0, + 248, 1, 0, 0, 2, 0, 1, 0, 7, 0, 0, 0, 6, 0, 0, 0, 0, 0, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 189, 1, 0, 0, 122, 0, 0, 0, + 245, 1, 0, 0, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 188, 1, 0, 0, 3, 0, 1, 0, - 216, 1, 0, 0, 2, 0, 1, 0, + 244, 1, 0, 0, 3, 0, 1, 0, + 16, 2, 0, 0, 2, 0, 1, 0, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 1, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 213, 1, 0, 0, 170, 0, 0, 0, + 13, 2, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 216, 1, 0, 0, 3, 0, 1, 0, - 244, 1, 0, 0, 2, 0, 1, 0, + 16, 2, 0, 0, 3, 0, 1, 0, + 44, 2, 0, 0, 2, 0, 1, 0, 9, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 241, 1, 0, 0, 10, 1, 0, 0, + 41, 2, 0, 0, 10, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 252, 1, 0, 0, 3, 0, 1, 0, - 8, 2, 0, 0, 2, 0, 1, 0, + 52, 2, 0, 0, 3, 0, 1, 0, + 64, 2, 0, 0, 2, 0, 1, 0, 10, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 5, 2, 0, 0, 218, 0, 0, 0, + 61, 2, 0, 0, 218, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 68, 2, 0, 0, 3, 0, 1, 0, + 80, 2, 0, 0, 2, 0, 1, 0, + 11, 0, 0, 0, 8, 0, 0, 0, + 0, 0, 1, 0, 11, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 77, 2, 0, 0, 210, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 84, 2, 0, 0, 3, 0, 1, 0, + 112, 2, 0, 0, 2, 0, 1, 0, + 12, 0, 0, 0, 9, 0, 0, 0, + 0, 0, 1, 0, 12, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 109, 2, 0, 0, 210, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 12, 2, 0, 0, 3, 0, 1, 0, - 24, 2, 0, 0, 2, 0, 1, 0, + 116, 2, 0, 0, 3, 0, 1, 0, + 144, 2, 0, 0, 2, 0, 1, 0, 114, 116, 114, 101, 101, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9234,16 +9352,46 @@ static const ::capnp::_::AlignedData<236> b_89aa8f4e88036b9e = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 116, 105, 108, 101, 71, 108, 111, 98, + 97, 108, 79, 114, 100, 101, 114, 77, + 105, 110, 79, 102, 102, 115, 101, 116, + 115, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 116, 105, 108, 101, 71, 108, 111, 98, + 97, 108, 79, 114, 100, 101, 114, 77, + 97, 120, 79, 102, 102, 115, 101, 116, + 115, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 0, 1, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }; ::capnp::word const* const bp_89aa8f4e88036b9e = b_89aa8f4e88036b9e.words; #if !CAPNP_LITE -static const uint16_t m_89aa8f4e88036b9e[] = {9, 10, 0, 6, 5, 8, 1, 7, 4, 2, 3}; -static const uint16_t i_89aa8f4e88036b9e[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; +static const uint16_t m_89aa8f4e88036b9e[] = {9, 10, 0, 12, 11, 6, 5, 8, 1, 7, 4, 2, 3}; +static const uint16_t i_89aa8f4e88036b9e[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; const ::capnp::_::RawSchema s_89aa8f4e88036b9e = { - 0x89aa8f4e88036b9e, b_89aa8f4e88036b9e.words, 236, nullptr, m_89aa8f4e88036b9e, - 0, 11, i_89aa8f4e88036b9e, nullptr, nullptr, { &s_89aa8f4e88036b9e, nullptr, nullptr, 0, 0, nullptr }, false + 0x89aa8f4e88036b9e, b_89aa8f4e88036b9e.words, 280, nullptr, m_89aa8f4e88036b9e, + 0, 13, i_89aa8f4e88036b9e, nullptr, nullptr, { &s_89aa8f4e88036b9e, nullptr, nullptr, 0, 0, nullptr }, false }; #endif // !CAPNP_LITE static const ::capnp::_::AlignedData<107> b_d492b6734d5e3bf5 = { @@ -9253,7 +9401,7 @@ static const ::capnp::_::AlignedData<107> b_d492b6734d5e3bf5 = { 127, 216, 135, 181, 36, 146, 125, 181, 4, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 68, 103, 0, 0, 96, 106, 0, 0, + 136, 105, 0, 0, 164, 108, 0, 0, 21, 0, 0, 0, 58, 1, 0, 0, 37, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9375,7 +9523,7 @@ static const ::capnp::_::AlignedData<50> b_bde8ebd7b13d8625 = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 97, 106, 0, 0, 240, 106, 0, 0, + 165, 108, 0, 0, 52, 109, 0, 0, 21, 0, 0, 0, 2, 1, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9436,7 +9584,7 @@ static const ::capnp::_::AlignedData<55> b_a736c51d292ca752 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 242, 106, 0, 0, 67, 107, 0, 0, + 54, 109, 0, 0, 135, 109, 0, 0, 21, 0, 0, 0, 50, 1, 0, 0, 37, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9502,7 +9650,7 @@ static const ::capnp::_::AlignedData<49> b_cd8abc9dabc4b03f = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 69, 107, 0, 0, 11, 108, 0, 0, + 137, 109, 0, 0, 79, 110, 0, 0, 21, 0, 0, 0, 2, 1, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9562,7 +9710,7 @@ static const ::capnp::_::AlignedData<56> b_cfea684b4bcd0721 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 13, 108, 0, 0, 142, 108, 0, 0, + 81, 110, 0, 0, 210, 110, 0, 0, 21, 0, 0, 0, 146, 1, 0, 0, 45, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9632,7 +9780,7 @@ static const ::capnp::_::AlignedData<69> b_aaeeafe1e9f3ea1c = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 144, 108, 0, 0, 94, 109, 0, 0, + 212, 110, 0, 0, 162, 111, 0, 0, 21, 0, 0, 0, 194, 1, 0, 0, 45, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9715,7 +9863,7 @@ static const ::capnp::_::AlignedData<56> b_f5a35661031194d2 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 96, 109, 0, 0, 226, 109, 0, 0, + 164, 111, 0, 0, 38, 112, 0, 0, 21, 0, 0, 0, 98, 1, 0, 0, 41, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9785,7 +9933,7 @@ static const ::capnp::_::AlignedData<35> b_e68edfc0939e63df = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 109, 0, 0, 33, 110, 0, 0, + 40, 112, 0, 0, 101, 112, 0, 0, 21, 0, 0, 0, 42, 1, 0, 0, 37, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9834,7 +9982,7 @@ static const ::capnp::_::AlignedData<56> b_891a70a671f15cf6 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 35, 110, 0, 0, 163, 110, 0, 0, + 103, 112, 0, 0, 231, 112, 0, 0, 21, 0, 0, 0, 82, 1, 0, 0, 41, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9904,7 +10052,7 @@ static const ::capnp::_::AlignedData<57> b_805c080c10c1e959 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 165, 110, 0, 0, 25, 112, 0, 0, + 233, 112, 0, 0, 93, 114, 0, 0, 21, 0, 0, 0, 90, 1, 0, 0, 41, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -9976,7 +10124,7 @@ static const ::capnp::_::AlignedData<53> b_83f094010132ff21 = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 27, 112, 0, 0, 99, 113, 0, 0, + 95, 114, 0, 0, 167, 115, 0, 0, 21, 0, 0, 0, 74, 1, 0, 0, 41, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10043,7 +10191,7 @@ static const ::capnp::_::AlignedData<70> b_ebe17f59ac9a1df1 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 101, 113, 0, 0, 8, 114, 0, 0, + 169, 115, 0, 0, 76, 116, 0, 0, 21, 0, 0, 0, 82, 1, 0, 0, 41, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10131,7 +10279,7 @@ static const ::capnp::_::AlignedData<50> b_e06f571aa93eb314 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 10, 114, 0, 0, 133, 114, 0, 0, + 78, 116, 0, 0, 201, 116, 0, 0, 21, 0, 0, 0, 26, 1, 0, 0, 37, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10196,7 +10344,7 @@ static const ::capnp::_::AlignedData<108> b_9fd8fc2f462b2d06 = { 127, 216, 135, 181, 36, 146, 125, 181, 5, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 135, 114, 0, 0, 196, 115, 0, 0, + 203, 116, 0, 0, 8, 118, 0, 0, 21, 0, 0, 0, 34, 1, 0, 0, 37, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10315,7 +10463,7 @@ static const ::capnp::_::AlignedData<52> b_8965edf5597ce627 = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 198, 115, 0, 0, 63, 116, 0, 0, + 10, 118, 0, 0, 131, 118, 0, 0, 21, 0, 0, 0, 90, 1, 0, 0, 41, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10381,7 +10529,7 @@ static const ::capnp::_::AlignedData<46> b_aac8bf9b5211388b = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 65, 116, 0, 0, 198, 116, 0, 0, + 133, 118, 0, 0, 10, 119, 0, 0, 21, 0, 0, 0, 98, 1, 0, 0, 41, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10438,7 +10586,7 @@ static const ::capnp::_::AlignedData<54> b_ca2d4d0bfe4ae5d9 = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 200, 116, 0, 0, 138, 118, 0, 0, + 12, 119, 0, 0, 206, 120, 0, 0, 21, 0, 0, 0, 250, 0, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10506,7 +10654,7 @@ static const ::capnp::_::AlignedData<66> b_e193f1f45a9f102e = { 127, 216, 135, 181, 36, 146, 125, 181, 3, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 140, 118, 0, 0, 244, 119, 0, 0, + 208, 120, 0, 0, 56, 122, 0, 0, 21, 0, 0, 0, 226, 0, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10583,7 +10731,7 @@ static const ::capnp::_::AlignedData<82> b_fafb9c94c6b54ec9 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, - 246, 119, 0, 0, 66, 122, 0, 0, + 58, 122, 0, 0, 134, 124, 0, 0, 21, 0, 0, 0, 2, 1, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10679,7 +10827,7 @@ static const ::capnp::_::AlignedData<39> b_ce64eabcdabb02b5 = { 127, 216, 135, 181, 36, 146, 125, 181, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 68, 122, 0, 0, 56, 123, 0, 0, + 136, 124, 0, 0, 124, 125, 0, 0, 21, 0, 0, 0, 242, 0, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10732,7 +10880,7 @@ static const ::capnp::_::AlignedData<27> b_f3bb391da5271019 = { 127, 216, 135, 181, 36, 146, 125, 181, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 58, 123, 0, 0, 99, 123, 0, 0, + 126, 125, 0, 0, 167, 125, 0, 0, 21, 0, 0, 0, 234, 0, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10770,7 +10918,7 @@ static const ::capnp::_::AlignedData<36> b_87f0466598bb29be = { 127, 216, 135, 181, 36, 146, 125, 181, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 101, 123, 0, 0, 248, 123, 0, 0, + 169, 125, 0, 0, 60, 126, 0, 0, 21, 0, 0, 0, 42, 1, 0, 0, 37, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10820,7 +10968,7 @@ static const ::capnp::_::AlignedData<64> b_ff44627d34d063ab = { 127, 216, 135, 181, 36, 146, 125, 181, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 250, 123, 0, 0, 84, 124, 0, 0, + 62, 126, 0, 0, 152, 126, 0, 0, 21, 0, 0, 0, 2, 1, 0, 0, 33, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -10895,7 +11043,7 @@ static const ::capnp::_::AlignedData<56> b_8de04c65deeb1510 = { 127, 216, 135, 181, 36, 146, 125, 181, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 86, 124, 0, 0, 217, 124, 0, 0, + 154, 126, 0, 0, 29, 127, 0, 0, 21, 0, 0, 0, 26, 1, 0, 0, 37, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/tiledb/sm/serialization/tiledb-rest.capnp.h b/tiledb/sm/serialization/tiledb-rest.capnp.h index 5ebc892c812..dcd0ec42d92 100644 --- a/tiledb/sm/serialization/tiledb-rest.capnp.h +++ b/tiledb/sm/serialization/tiledb-rest.capnp.h @@ -1509,7 +1509,7 @@ struct FragmentMetadata { struct GenericTileOffsets; struct _capnpPrivate { - CAPNP_DECLARE_STRUCT_HEADER(cde352fc27e7ca61, 4, 23) + CAPNP_DECLARE_STRUCT_HEADER(cde352fc27e7ca61, 4, 27) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; @@ -1526,7 +1526,7 @@ struct FragmentMetadata::GenericTileOffsets { class Pipeline; struct _capnpPrivate { - CAPNP_DECLARE_STRUCT_HEADER(89aa8f4e88036b9e, 3, 8) + CAPNP_DECLARE_STRUCT_HEADER(89aa8f4e88036b9e, 3, 10) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; @@ -13275,6 +13275,30 @@ class FragmentMetadata::Reader { inline bool hasArraySchemaName() const; inline ::capnp::Text::Reader getArraySchemaName() const; + inline bool hasTileGlobalOrderMinBuffer() const; + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader + getTileGlobalOrderMinBuffer() const; + + inline bool hasTileGlobalOrderMinVarBuffer() const; + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader + getTileGlobalOrderMinVarBuffer() const; + + inline bool hasTileGlobalOrderMaxBuffer() const; + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader + getTileGlobalOrderMaxBuffer() const; + + inline bool hasTileGlobalOrderMaxVarBuffer() const; + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader + getTileGlobalOrderMaxVarBuffer() const; + private: ::capnp::_::StructReader _reader; template @@ -13781,6 +13805,110 @@ class FragmentMetadata::Builder { inline void adoptArraySchemaName(::capnp::Orphan<::capnp::Text>&& value); inline ::capnp::Orphan<::capnp::Text> disownArraySchemaName(); + inline bool hasTileGlobalOrderMinBuffer(); + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder + getTileGlobalOrderMinBuffer(); + inline void setTileGlobalOrderMinBuffer( + ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader value); + inline void setTileGlobalOrderMinBuffer( + ::kj::ArrayPtr< + const ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>::Reader> + value); + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder + initTileGlobalOrderMinBuffer(unsigned int size); + inline void adoptTileGlobalOrderMinBuffer( + ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>&& value); + inline ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>> + disownTileGlobalOrderMinBuffer(); + + inline bool hasTileGlobalOrderMinVarBuffer(); + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder + getTileGlobalOrderMinVarBuffer(); + inline void setTileGlobalOrderMinVarBuffer( + ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader value); + inline void setTileGlobalOrderMinVarBuffer( + ::kj::ArrayPtr< + const ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>::Reader> + value); + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder + initTileGlobalOrderMinVarBuffer(unsigned int size); + inline void adoptTileGlobalOrderMinVarBuffer( + ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>&& value); + inline ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>> + disownTileGlobalOrderMinVarBuffer(); + + inline bool hasTileGlobalOrderMaxBuffer(); + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder + getTileGlobalOrderMaxBuffer(); + inline void setTileGlobalOrderMaxBuffer( + ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader value); + inline void setTileGlobalOrderMaxBuffer( + ::kj::ArrayPtr< + const ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>::Reader> + value); + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder + initTileGlobalOrderMaxBuffer(unsigned int size); + inline void adoptTileGlobalOrderMaxBuffer( + ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>&& value); + inline ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>> + disownTileGlobalOrderMaxBuffer(); + + inline bool hasTileGlobalOrderMaxVarBuffer(); + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder + getTileGlobalOrderMaxVarBuffer(); + inline void setTileGlobalOrderMaxVarBuffer( + ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader value); + inline void setTileGlobalOrderMaxVarBuffer( + ::kj::ArrayPtr< + const ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>::Reader> + value); + inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder + initTileGlobalOrderMaxVarBuffer(unsigned int size); + inline void adoptTileGlobalOrderMaxVarBuffer( + ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>&& value); + inline ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>> + disownTileGlobalOrderMaxVarBuffer(); + private: ::capnp::_::StructBuilder _builder; template @@ -13873,6 +14001,14 @@ class FragmentMetadata::GenericTileOffsets::Reader { inline ::uint64_t getProcessedConditionsOffsets() const; + inline bool hasTileGlobalOrderMinOffsets() const; + inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Reader + getTileGlobalOrderMinOffsets() const; + + inline bool hasTileGlobalOrderMaxOffsets() const; + inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Reader + getTileGlobalOrderMaxOffsets() const; + private: ::capnp::_::StructReader _reader; template @@ -14033,6 +14169,36 @@ class FragmentMetadata::GenericTileOffsets::Builder { inline ::uint64_t getProcessedConditionsOffsets(); inline void setProcessedConditionsOffsets(::uint64_t value); + inline bool hasTileGlobalOrderMinOffsets(); + inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Builder + getTileGlobalOrderMinOffsets(); + inline void setTileGlobalOrderMinOffsets( + ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Reader value); + inline void setTileGlobalOrderMinOffsets( + ::kj::ArrayPtr value); + inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Builder + initTileGlobalOrderMinOffsets(unsigned int size); + inline void adoptTileGlobalOrderMinOffsets( + ::capnp::Orphan<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>&& + value); + inline ::capnp::Orphan<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>> + disownTileGlobalOrderMinOffsets(); + + inline bool hasTileGlobalOrderMaxOffsets(); + inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Builder + getTileGlobalOrderMaxOffsets(); + inline void setTileGlobalOrderMaxOffsets( + ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Reader value); + inline void setTileGlobalOrderMaxOffsets( + ::kj::ArrayPtr value); + inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Builder + initTileGlobalOrderMaxOffsets(unsigned int size); + inline void adoptTileGlobalOrderMaxOffsets( + ::capnp::Orphan<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>&& + value); + inline ::capnp::Orphan<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>> + disownTileGlobalOrderMaxOffsets(); + private: ::capnp::_::StructBuilder _builder; template @@ -32463,6 +32629,342 @@ FragmentMetadata::Builder::disownArraySchemaName() { _builder.getPointerField(::capnp::bounded<22>() * ::capnp::POINTERS)); } +inline bool FragmentMetadata::Reader::hasTileGlobalOrderMinBuffer() const { + return !_reader.getPointerField(::capnp::bounded<23>() * ::capnp::POINTERS) + .isNull(); +} +inline bool FragmentMetadata::Builder::hasTileGlobalOrderMinBuffer() { + return !_builder.getPointerField(::capnp::bounded<23>() * ::capnp::POINTERS) + .isNull(); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader +FragmentMetadata::Reader::getTileGlobalOrderMinBuffer() const { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::get(_reader + .getPointerField( + ::capnp::bounded<23>() * + ::capnp::POINTERS)); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder +FragmentMetadata::Builder::getTileGlobalOrderMinBuffer() { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::get(_builder + .getPointerField( + ::capnp::bounded<23>() * + ::capnp::POINTERS)); +} +inline void FragmentMetadata::Builder::setTileGlobalOrderMinBuffer( + ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + set(_builder.getPointerField(::capnp::bounded<23>() * ::capnp::POINTERS), + value); +} +inline void FragmentMetadata::Builder::setTileGlobalOrderMinBuffer( + ::kj::ArrayPtr< + const ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>::Reader> + value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + set(_builder.getPointerField(::capnp::bounded<23>() * ::capnp::POINTERS), + value); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder +FragmentMetadata::Builder::initTileGlobalOrderMinBuffer(unsigned int size) { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + init( + _builder.getPointerField(::capnp::bounded<23>() * ::capnp::POINTERS), + size); +} +inline void FragmentMetadata::Builder::adoptTileGlobalOrderMinBuffer( + ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>&& value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + adopt( + _builder.getPointerField(::capnp::bounded<23>() * ::capnp::POINTERS), + kj::mv(value)); +} +inline ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>> +FragmentMetadata::Builder::disownTileGlobalOrderMinBuffer() { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::disown(_builder + .getPointerField( + ::capnp::bounded<23>() * + ::capnp::POINTERS)); +} + +inline bool FragmentMetadata::Reader::hasTileGlobalOrderMinVarBuffer() const { + return !_reader.getPointerField(::capnp::bounded<24>() * ::capnp::POINTERS) + .isNull(); +} +inline bool FragmentMetadata::Builder::hasTileGlobalOrderMinVarBuffer() { + return !_builder.getPointerField(::capnp::bounded<24>() * ::capnp::POINTERS) + .isNull(); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader +FragmentMetadata::Reader::getTileGlobalOrderMinVarBuffer() const { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::get(_reader + .getPointerField( + ::capnp::bounded<24>() * + ::capnp::POINTERS)); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder +FragmentMetadata::Builder::getTileGlobalOrderMinVarBuffer() { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::get(_builder + .getPointerField( + ::capnp::bounded<24>() * + ::capnp::POINTERS)); +} +inline void FragmentMetadata::Builder::setTileGlobalOrderMinVarBuffer( + ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + set(_builder.getPointerField(::capnp::bounded<24>() * ::capnp::POINTERS), + value); +} +inline void FragmentMetadata::Builder::setTileGlobalOrderMinVarBuffer( + ::kj::ArrayPtr< + const ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>::Reader> + value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + set(_builder.getPointerField(::capnp::bounded<24>() * ::capnp::POINTERS), + value); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder +FragmentMetadata::Builder::initTileGlobalOrderMinVarBuffer(unsigned int size) { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + init( + _builder.getPointerField(::capnp::bounded<24>() * ::capnp::POINTERS), + size); +} +inline void FragmentMetadata::Builder::adoptTileGlobalOrderMinVarBuffer( + ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>&& value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + adopt( + _builder.getPointerField(::capnp::bounded<24>() * ::capnp::POINTERS), + kj::mv(value)); +} +inline ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>> +FragmentMetadata::Builder::disownTileGlobalOrderMinVarBuffer() { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::disown(_builder + .getPointerField( + ::capnp::bounded<24>() * + ::capnp::POINTERS)); +} + +inline bool FragmentMetadata::Reader::hasTileGlobalOrderMaxBuffer() const { + return !_reader.getPointerField(::capnp::bounded<25>() * ::capnp::POINTERS) + .isNull(); +} +inline bool FragmentMetadata::Builder::hasTileGlobalOrderMaxBuffer() { + return !_builder.getPointerField(::capnp::bounded<25>() * ::capnp::POINTERS) + .isNull(); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader +FragmentMetadata::Reader::getTileGlobalOrderMaxBuffer() const { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::get(_reader + .getPointerField( + ::capnp::bounded<25>() * + ::capnp::POINTERS)); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder +FragmentMetadata::Builder::getTileGlobalOrderMaxBuffer() { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::get(_builder + .getPointerField( + ::capnp::bounded<25>() * + ::capnp::POINTERS)); +} +inline void FragmentMetadata::Builder::setTileGlobalOrderMaxBuffer( + ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + set(_builder.getPointerField(::capnp::bounded<25>() * ::capnp::POINTERS), + value); +} +inline void FragmentMetadata::Builder::setTileGlobalOrderMaxBuffer( + ::kj::ArrayPtr< + const ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>::Reader> + value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + set(_builder.getPointerField(::capnp::bounded<25>() * ::capnp::POINTERS), + value); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder +FragmentMetadata::Builder::initTileGlobalOrderMaxBuffer(unsigned int size) { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + init( + _builder.getPointerField(::capnp::bounded<25>() * ::capnp::POINTERS), + size); +} +inline void FragmentMetadata::Builder::adoptTileGlobalOrderMaxBuffer( + ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>&& value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + adopt( + _builder.getPointerField(::capnp::bounded<25>() * ::capnp::POINTERS), + kj::mv(value)); +} +inline ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>> +FragmentMetadata::Builder::disownTileGlobalOrderMaxBuffer() { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::disown(_builder + .getPointerField( + ::capnp::bounded<25>() * + ::capnp::POINTERS)); +} + +inline bool FragmentMetadata::Reader::hasTileGlobalOrderMaxVarBuffer() const { + return !_reader.getPointerField(::capnp::bounded<26>() * ::capnp::POINTERS) + .isNull(); +} +inline bool FragmentMetadata::Builder::hasTileGlobalOrderMaxVarBuffer() { + return !_builder.getPointerField(::capnp::bounded<26>() * ::capnp::POINTERS) + .isNull(); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader +FragmentMetadata::Reader::getTileGlobalOrderMaxVarBuffer() const { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::get(_reader + .getPointerField( + ::capnp::bounded<26>() * + ::capnp::POINTERS)); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder +FragmentMetadata::Builder::getTileGlobalOrderMaxVarBuffer() { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::get(_builder + .getPointerField( + ::capnp::bounded<26>() * + ::capnp::POINTERS)); +} +inline void FragmentMetadata::Builder::setTileGlobalOrderMaxVarBuffer( + ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Reader value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + set(_builder.getPointerField(::capnp::bounded<26>() * ::capnp::POINTERS), + value); +} +inline void FragmentMetadata::Builder::setTileGlobalOrderMaxVarBuffer( + ::kj::ArrayPtr< + const ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>::Reader> + value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + set(_builder.getPointerField(::capnp::bounded<26>() * ::capnp::POINTERS), + value); +} +inline ::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>::Builder +FragmentMetadata::Builder::initTileGlobalOrderMaxVarBuffer(unsigned int size) { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + init( + _builder.getPointerField(::capnp::bounded<26>() * ::capnp::POINTERS), + size); +} +inline void FragmentMetadata::Builder::adoptTileGlobalOrderMaxVarBuffer( + ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>&& value) { + ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>:: + adopt( + _builder.getPointerField(::capnp::bounded<26>() * ::capnp::POINTERS), + kj::mv(value)); +} +inline ::capnp::Orphan<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>> +FragmentMetadata::Builder::disownTileGlobalOrderMaxVarBuffer() { + return ::capnp::_::PointerHelpers<::capnp::List< + ::capnp::List<::uint8_t, ::capnp::Kind::PRIMITIVE>, + ::capnp::Kind::LIST>>::disown(_builder + .getPointerField( + ::capnp::bounded<26>() * + ::capnp::POINTERS)); +} + inline ::uint64_t FragmentMetadata::GenericTileOffsets::Reader::getRtree() const { return _reader.getDataField<::uint64_t>( @@ -33003,6 +33505,140 @@ FragmentMetadata::GenericTileOffsets::Builder::setProcessedConditionsOffsets( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } +inline bool +FragmentMetadata::GenericTileOffsets::Reader::hasTileGlobalOrderMinOffsets() + const { + return !_reader.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS) + .isNull(); +} +inline bool +FragmentMetadata::GenericTileOffsets::Builder::hasTileGlobalOrderMinOffsets() { + return !_builder.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS) + .isNull(); +} +inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Reader +FragmentMetadata::GenericTileOffsets::Reader::getTileGlobalOrderMinOffsets() + const { + return ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::get( + _reader.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS)); +} +inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Builder +FragmentMetadata::GenericTileOffsets::Builder::getTileGlobalOrderMinOffsets() { + return ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::get( + _builder.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS)); +} +inline void +FragmentMetadata::GenericTileOffsets::Builder::setTileGlobalOrderMinOffsets( + ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Reader value) { + ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::set( + _builder.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS), + value); +} +inline void +FragmentMetadata::GenericTileOffsets::Builder::setTileGlobalOrderMinOffsets( + ::kj::ArrayPtr value) { + ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::set( + _builder.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS), + value); +} +inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Builder +FragmentMetadata::GenericTileOffsets::Builder::initTileGlobalOrderMinOffsets( + unsigned int size) { + return ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::init( + _builder.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS), + size); +} +inline void +FragmentMetadata::GenericTileOffsets::Builder::adoptTileGlobalOrderMinOffsets( + ::capnp::Orphan<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>&& + value) { + ::capnp::_::PointerHelpers< + ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>:: + adopt( + _builder.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS), + kj::mv(value)); +} +inline ::capnp::Orphan<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>> +FragmentMetadata::GenericTileOffsets::Builder:: + disownTileGlobalOrderMinOffsets() { + return ::capnp::_::PointerHelpers< + ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>:: + disown( + _builder.getPointerField(::capnp::bounded<8>() * ::capnp::POINTERS)); +} + +inline bool +FragmentMetadata::GenericTileOffsets::Reader::hasTileGlobalOrderMaxOffsets() + const { + return !_reader.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS) + .isNull(); +} +inline bool +FragmentMetadata::GenericTileOffsets::Builder::hasTileGlobalOrderMaxOffsets() { + return !_builder.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS) + .isNull(); +} +inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Reader +FragmentMetadata::GenericTileOffsets::Reader::getTileGlobalOrderMaxOffsets() + const { + return ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::get( + _reader.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS)); +} +inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Builder +FragmentMetadata::GenericTileOffsets::Builder::getTileGlobalOrderMaxOffsets() { + return ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::get( + _builder.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS)); +} +inline void +FragmentMetadata::GenericTileOffsets::Builder::setTileGlobalOrderMaxOffsets( + ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Reader value) { + ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::set( + _builder.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS), + value); +} +inline void +FragmentMetadata::GenericTileOffsets::Builder::setTileGlobalOrderMaxOffsets( + ::kj::ArrayPtr value) { + ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::set( + _builder.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS), + value); +} +inline ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>::Builder +FragmentMetadata::GenericTileOffsets::Builder::initTileGlobalOrderMaxOffsets( + unsigned int size) { + return ::capnp::_:: + PointerHelpers<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>::init( + _builder.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS), + size); +} +inline void +FragmentMetadata::GenericTileOffsets::Builder::adoptTileGlobalOrderMaxOffsets( + ::capnp::Orphan<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>&& + value) { + ::capnp::_::PointerHelpers< + ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>:: + adopt( + _builder.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS), + kj::mv(value)); +} +inline ::capnp::Orphan<::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>> +FragmentMetadata::GenericTileOffsets::Builder:: + disownTileGlobalOrderMaxOffsets() { + return ::capnp::_::PointerHelpers< + ::capnp::List<::uint64_t, ::capnp::Kind::PRIMITIVE>>:: + disown( + _builder.getPointerField(::capnp::bounded<9>() * ::capnp::POINTERS)); +} + inline ::uint64_t MultiPartUploadState::Reader::getPartNumber() const { return _reader.getDataField<::uint64_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); diff --git a/tiledb/sm/tile/tile_metadata_generator.cc b/tiledb/sm/tile/tile_metadata_generator.cc index 291f3174e25..f2acb4a3440 100644 --- a/tiledb/sm/tile/tile_metadata_generator.cc +++ b/tiledb/sm/tile/tile_metadata_generator.cc @@ -275,12 +275,17 @@ TileMetadataGenerator::TileMetadataGenerator( const bool var_size, const uint64_t cell_size, const uint64_t cell_val_num) - : var_size_(var_size) + : is_dim_(is_dim) + , var_size_(var_size) , type_(type) , min_(nullptr) , min_size_(0) , max_(nullptr) , max_size_(0) + , global_order_min_(nullptr) + , global_order_min_size_(0) + , global_order_max_(nullptr) + , global_order_max_size_(0) , sum_(sizeof(uint64_t)) , null_count_(0) , cell_size_(cell_size) @@ -375,7 +380,17 @@ void TileMetadataGenerator::process_cell_slab( } void TileMetadataGenerator::set_tile_metadata(WriterTileTuple& tile) { - tile.set_metadata(min_, min_size_, max_, max_size_, sum_, null_count_); + tile.set_metadata( + min_, + min_size_, + max_, + max_size_, + global_order_min_, + global_order_min_size_, + global_order_max_, + global_order_max_size_, + sum_, + null_count_); } /* ****************************** */ @@ -499,6 +514,13 @@ void TileMetadataGenerator::process_cell_range( min_max(fixed_tile, start, end); } + if (is_dim_) { + iassert(end > start); + global_order_min_ = &tile.fixed_tile().data_as()[start]; + global_order_max_ = &tile.fixed_tile().data_as()[end - 1]; + global_order_min_size_ = global_order_max_size_ = sizeof(T); + } + if (has_sum_) { Sum::sum_type>::sum( fixed_tile, start, end, sum_); @@ -517,6 +539,23 @@ void TileMetadataGenerator::process_cell_range( } } + if (is_dim_) { + /* + uint64_t* max_sizes = reinterpret_cast( + loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data()); + + const uint64_t fixed_offset = tile / sizeof(uint64_t); + max_sizes[fixed_offset] = + data.var_tile().size() - source_offsets[data.cell_num() - 1]; + if (data.cell_num() == 1) { + min_sizes[fixed_offset] = max_sizes[fixed_offset]; + } else { + min_sizes[fixed_offset] = source_offsets[1] - source_offsets[0]; + } + */ + throw std::logic_error("TODO"); + } + if (has_sum_) { Sum::sum_type>::sum_nullable( fixed_tile, validity_tile, start, end, sum_); diff --git a/tiledb/sm/tile/tile_metadata_generator.h b/tiledb/sm/tile/tile_metadata_generator.h index 71ac7575b27..8ec33ac939e 100644 --- a/tiledb/sm/tile/tile_metadata_generator.h +++ b/tiledb/sm/tile/tile_metadata_generator.h @@ -239,6 +239,9 @@ class TileMetadataGenerator { /* PRIVATE ATTRIBUTES */ /* ********************************* */ + /** Is this tile for a dimension. */ + bool is_dim_; + /** Is this a var tile. */ bool var_size_; @@ -257,6 +260,20 @@ class TileMetadataGenerator { /** Maximum value size for this tile. */ uint64_t max_size_; + /** Value for this dimension of the tile's global order minimum value */ + const void* global_order_min_; + + /** Size of the value for this dimension of the tile's global order minimum + * value */ + uint64_t global_order_min_size_; + + /** Value for this dimension of the tile's global order maximum value */ + const void* global_order_max_; + + /** Size of the value for this dimension of the tile's global order maximum + * value */ + uint64_t global_order_max_size_; + /** Sum of values. */ ByteVec sum_; diff --git a/tiledb/sm/tile/writer_tile_tuple.cc b/tiledb/sm/tile/writer_tile_tuple.cc index 9ce07d20f95..a84b50591a8 100644 --- a/tiledb/sm/tile/writer_tile_tuple.cc +++ b/tiledb/sm/tile/writer_tile_tuple.cc @@ -92,6 +92,10 @@ void WriterTileTuple::set_metadata( const uint64_t min_size, const void* max, const uint64_t max_size, + const void* global_order_min, + const uint64_t global_order_min_size, + const void* global_order_max, + const uint64_t global_order_max_size, const ByteVec& sum, const uint64_t null_count) { min_.resize(min_size); @@ -106,6 +110,16 @@ void WriterTileTuple::set_metadata( memcpy(max_.data(), max, max_size); } + if (global_order_min != nullptr) { + global_order_min_.emplace(global_order_min_size); + memcpy(global_order_min_->data(), global_order_min, global_order_min_size); + } + + if (global_order_max != nullptr) { + global_order_max_.emplace(global_order_max_size); + memcpy(global_order_max_->data(), global_order_max, global_order_max_size); + } + sum_ = sum; null_count_ = null_count; diff --git a/tiledb/sm/tile/writer_tile_tuple.h b/tiledb/sm/tile/writer_tile_tuple.h index 8a2ca28938b..5762254551e 100644 --- a/tiledb/sm/tile/writer_tile_tuple.h +++ b/tiledb/sm/tile/writer_tile_tuple.h @@ -148,6 +148,22 @@ class WriterTileTuple { return max_; } + /** + * @return if this is a dimension tile, the value of this dimension for the + * global order minimum coordinate + */ + inline const std::optional& global_order_min() const { + return global_order_min_; + } + + /** + * @return if this is a dimension tile, the value of this dimension for the + * global order maximum coordinate + */ + inline const std::optional& global_order_max() const { + return global_order_max_; + } + /** * Returns the tile null count. * @@ -173,6 +189,12 @@ class WriterTileTuple { * @param min_size Minimum size. * @param max Maximum. * @param max_size Maxmum size. + * @param global_order_min The value of this dimension for the tile's global + * order minimum coordinate + * @param global_order_min_size The size of the above + * @param global_order_max The value of this dimension for the tile's global + * order maximum coordinate + * @param global_order_max_size The size of the above * @param sum Sum. * @param null_count Null count. */ @@ -181,6 +203,10 @@ class WriterTileTuple { const uint64_t min_size, const void* max, const uint64_t max_size, + const void* global_order_min, + const uint64_t global_order_min_size, + const void* global_order_max, + const uint64_t global_order_max_size, const ByteVec& sum, const uint64_t null_count); @@ -250,6 +276,14 @@ class WriterTileTuple { /** Maximum value size for this tile. */ uint64_t max_size_; + /** Value of this dimension in the global order minimum coordinate of this + * tile */ + std::optional global_order_min_; + + /** Value of this dimension in the global order maximum coordinate of this + * tile */ + std::optional global_order_max_; + /** Sum of values. */ ByteVec sum_; From 273a0bbd3e83488de66830b9a51aa29a1bc19aa0 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 5 Aug 2025 14:36:02 -0400 Subject: [PATCH 06/53] Fix test with serialization on --- test/src/unit-capi-fragment_info.cc | 5 ++++- tiledb/sm/fragment/fragment_info.cc | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/test/src/unit-capi-fragment_info.cc b/test/src/unit-capi-fragment_info.cc index a5a378df446..96213bb6177 100644 --- a/test/src/unit-capi-fragment_info.cc +++ b/test/src/unit-capi-fragment_info.cc @@ -720,13 +720,14 @@ TEST_CASE("C API: Test MBR fragment info", "[capi][fragment_info][mbr]") { // Load fragment info rc = tiledb_fragment_info_load(ctx, fragment_info); CHECK(rc == TILEDB_OK); - tiledb_config_free(&cfg); tiledb_fragment_info_t* deserialized_fragment_info = nullptr; if (serialized_load) { rc = tiledb_fragment_info_alloc( ctx, array_name.c_str(), &deserialized_fragment_info); CHECK(rc == TILEDB_OK); + rc = tiledb_fragment_info_set_config(ctx, deserialized_fragment_info, cfg); + CHECK(rc == TILEDB_OK); tiledb_fragment_info_serialize( ctx, array_name.c_str(), @@ -737,6 +738,8 @@ TEST_CASE("C API: Test MBR fragment info", "[capi][fragment_info][mbr]") { fragment_info = deserialized_fragment_info; } + tiledb_config_free(&cfg); + // Get fragment num uint32_t fragment_num; rc = tiledb_fragment_info_get_fragment_num(ctx, fragment_info, &fragment_num); diff --git a/tiledb/sm/fragment/fragment_info.cc b/tiledb/sm/fragment/fragment_info.cc index 16a89b22e2b..ade2666c469 100644 --- a/tiledb/sm/fragment/fragment_info.cc +++ b/tiledb/sm/fragment/fragment_info.cc @@ -77,6 +77,7 @@ void FragmentInfo::set_config(const Config& config) { throw FragmentInfoException("[set_config] Cannot set config after load"); } config_.inherit(config); + throw_if_not_ok(set_enc_key_from_config()); } void FragmentInfo::expand_anterior_ndrange( @@ -1003,6 +1004,8 @@ Status FragmentInfo::load(const ArrayDirectory& array_dir) { if (preload_rtrees & !meta->dense()) { meta->loaded_metadata()->load_rtree(enc_key_); + meta->loaded_metadata()->load_fragment_tile_global_order_bounds( + enc_key_); } return Status::Ok(); From 6ad6ac948126ddccd071dddaa4323db06244b677 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Wed, 6 Aug 2025 15:05:27 -0400 Subject: [PATCH 07/53] Fix set_num_tiles index for bounds buffers --- tiledb/sm/fragment/fragment_metadata.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index 8eeba503e38..2a2b8f048b5 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -1372,7 +1372,8 @@ void FragmentMetadata::set_num_tiles(uint64_t num_tiles) { // Sparse arrays also store the global order lower/upper bounds if (!array_schema_->dense() && is_dim) { - const unsigned dimension = i - array_schema_->dim_num(); + const unsigned dimension = + array_schema_->domain().get_dimension_index(it.first); loaded_metadata_ptr_->tile_global_order_min_buffer()[dimension].resize( num_tiles * cell_size, 0); loaded_metadata_ptr_->tile_global_order_max_buffer()[dimension].resize( From fa5b8ee231fde81d5a1566e1a5edcbf2d03f88c9 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Wed, 6 Aug 2025 15:05:42 -0400 Subject: [PATCH 08/53] Add tiledb::Array::context --- tiledb/sm/cpp_api/array.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tiledb/sm/cpp_api/array.h b/tiledb/sm/cpp_api/array.h index 5c4bca8c272..c1d9874af18 100644 --- a/tiledb/sm/cpp_api/array.h +++ b/tiledb/sm/cpp_api/array.h @@ -319,6 +319,10 @@ class Array { return std::string(uri); } + const Context& context() const { + return ctx_.get(); + } + /** Get the ArraySchema for the array. **/ ArraySchema schema() const { auto& ctx = ctx_.get(); From 01ec15b8085bc46282f0ec274c5d1bdcdade2d8a Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Wed, 6 Aug 2025 15:07:01 -0400 Subject: [PATCH 09/53] Refactor some functions into array_templates.h --- test/src/unit-sparse-global-order-reader.cc | 91 ++-------------- test/support/src/array_templates.h | 114 ++++++++++++++++++++ 2 files changed, 122 insertions(+), 83 deletions(-) diff --git a/test/src/unit-sparse-global-order-reader.cc b/test/src/unit-sparse-global-order-reader.cc index 0046caae331..e3a921c1dcb 100644 --- a/test/src/unit-sparse-global-order-reader.cc +++ b/test/src/unit-sparse-global-order-reader.cc @@ -649,36 +649,10 @@ void CSparseGlobalOrderFx::write_fragment( } CApiArray& array = *existing; + Array cpparray(vfs_test_setup_.ctx(), array, false); - // Create the query. - tiledb_query_t* query; - auto rc = tiledb_query_alloc(context(), array, TILEDB_WRITE, &query); - ASSERTER(rc == TILEDB_OK); - rc = tiledb_query_set_layout(context(), query, TILEDB_UNORDERED); - ASSERTER(rc == TILEDB_OK); - - auto field_sizes = templates::query::make_field_sizes(fragment); - templates::query::set_fields( - context(), - query, - field_sizes, - fragment, - [](unsigned d) { return "d" + std::to_string(d + 1); }, - [](unsigned a) { return "a" + std::to_string(a + 1); }); - - // Submit query. - rc = tiledb_query_submit(context(), query); - ASSERTER(std::optional() == error_if_any(rc)); - - // check that sizes match what we expect - const uint64_t expect_num_cells = fragment.size(); - const uint64_t num_cells = - templates::query::num_cells(fragment, field_sizes); - - ASSERTER(num_cells == expect_num_cells); - - // Clean up. - tiledb_query_free(&query); + templates::query::write_fragment( + fragment, cpparray, TILEDB_UNORDERED); } void CSparseGlobalOrderFx::write_1d_fragment_strings( @@ -3333,64 +3307,15 @@ TEST_CASE_METHOD( */ template void CSparseGlobalOrderFx::create_array(const Instance& instance) { - const auto dimensions = instance.dimensions(); - const auto attributes = instance.attributes(); - - std::vector dimension_names; - std::vector dimension_types; - std::vector dimension_ranges; - std::vector dimension_extents; - auto add_dimension = [&]( - const templates::Dimension& dimension) { - using CoordType = templates::Dimension::value_type; - dimension_names.push_back("d" + std::to_string(dimension_names.size() + 1)); - dimension_types.push_back(static_cast(D)); - dimension_ranges.push_back( - const_cast(&dimension.domain.lower_bound)); - dimension_extents.push_back(const_cast(&dimension.extent)); - }; - std::apply( - [&](const templates::Dimension&... dimension) { - (add_dimension(dimension), ...); - }, - dimensions); - - std::vector attribute_names; - std::vector attribute_types; - std::vector attribute_cell_val_nums; - std::vector attribute_nullables; - std::vector> attribute_compressors; - auto add_attribute = [&](Datatype datatype, - uint32_t cell_val_num, - bool nullable) { - attribute_names.push_back("a" + std::to_string(attribute_names.size() + 1)); - attribute_types.push_back(static_cast(datatype)); - attribute_cell_val_nums.push_back(cell_val_num); - attribute_nullables.push_back(nullable); - attribute_compressors.push_back(std::make_pair(TILEDB_FILTER_NONE, -1)); - }; - for (const auto& [datatype, cell_val_num, nullable] : attributes) { - add_attribute(datatype, cell_val_num, nullable); - } - - tiledb::test::create_array( - context(), + templates::ddl::create_array( array_name_, - TILEDB_SPARSE, - dimension_names, - dimension_types, - dimension_ranges, - dimension_extents, - attribute_names, - attribute_types, - attribute_cell_val_nums, - attribute_compressors, + Context(context(), false), + instance.dimensions(), + instance.attributes(), instance.tile_order(), instance.cell_order(), instance.tile_capacity(), - instance.allow_duplicates(), - false, - {attribute_nullables}); + instance.allow_duplicates()); } /** diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index 7858077b2b7..6e7b7a4bd78 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -36,12 +36,14 @@ #include "tiledb.h" #include "tiledb/common/unreachable.h" +#include "tiledb/sm/cpp_api/tiledb" #include "tiledb/sm/query/ast/query_ast.h" #include "tiledb/type/datatype_traits.h" #include "tiledb/type/range/range.h" #include #include +#include #include #include #include @@ -1379,8 +1381,120 @@ uint64_t num_cells(const F& fragment, const auto& field_sizes) { }(std::tuple_cat(fragment.dimensions(), fragment.attributes())); } +/** + * Writes a fragment to an array. + */ +template +void write_fragment( + const Fragment& fragment, + Array& forwrite, + tiledb_layout_t layout = TILEDB_UNORDERED) { + Query query(forwrite.context(), forwrite, TILEDB_WRITE); + query.set_layout(layout); + + auto field_sizes = make_field_sizes(fragment); + templates::query::set_fields( + query.ctx().ptr().get(), + query.ptr().get(), + field_sizes, + fragment, + [](unsigned d) { return "d" + std::to_string(d + 1); }, + [](unsigned a) { return "a" + std::to_string(a + 1); }); + + const auto status = query.submit(); + ASSERTER(status == Query::Status::COMPLETE); + + if (layout == TILEDB_GLOBAL_ORDER) { + query.finalize(); + } + + // check that sizes match what we expect + const uint64_t expect_num_cells = fragment.size(); + const uint64_t num_cells = + templates::query::num_cells(fragment, field_sizes); + + ASSERTER(num_cells == expect_num_cells); +} + } // namespace query +namespace ddl { + +/** + * Creates an array with a schema whose dimensions and attributes + * come from the simplified arguments. + * The names of the dimensions are d1, d2, etc. + * The names of the attributes are a1, a2, etc. + */ +template +void create_array( + const std::string& array_name, + const Context& context, + const std::tuple&...> dimensions, + std::vector> attributes, + tiledb_layout_t tile_order, + tiledb_layout_t cell_order, + uint64_t tile_capacity, + bool allow_duplicates) { + std::vector dimension_names; + std::vector dimension_types; + std::vector dimension_ranges; + std::vector dimension_extents; + auto add_dimension = [&]( + const templates::Dimension& dimension) { + using CoordType = templates::Dimension::value_type; + dimension_names.push_back("d" + std::to_string(dimension_names.size() + 1)); + dimension_types.push_back(static_cast(D)); + dimension_ranges.push_back( + const_cast(&dimension.domain.lower_bound)); + dimension_extents.push_back(const_cast(&dimension.extent)); + }; + std::apply( + [&](const templates::Dimension&... dimension) { + (add_dimension(dimension), ...); + }, + dimensions); + + std::vector attribute_names; + std::vector attribute_types; + std::vector attribute_cell_val_nums; + std::vector attribute_nullables; + std::vector> attribute_compressors; + auto add_attribute = [&](Datatype datatype, + uint32_t cell_val_num, + bool nullable) { + attribute_names.push_back("a" + std::to_string(attribute_names.size() + 1)); + attribute_types.push_back(static_cast(datatype)); + attribute_cell_val_nums.push_back(cell_val_num); + attribute_nullables.push_back(nullable); + attribute_compressors.push_back(std::make_pair(TILEDB_FILTER_NONE, -1)); + }; + for (const auto& [datatype, cell_val_num, nullable] : attributes) { + add_attribute(datatype, cell_val_num, nullable); + } + + tiledb::test::create_array( + context.ptr().get(), + array_name, + TILEDB_SPARSE, + dimension_names, + dimension_types, + dimension_ranges, + dimension_extents, + attribute_names, + attribute_types, + attribute_cell_val_nums, + attribute_compressors, + tile_order, + cell_order, + tile_capacity, + allow_duplicates, + false, + {attribute_nullables}); +} + +} // namespace ddl + } // namespace tiledb::test::templates #endif From bfbec1f74f500f41ec338b74659ff2c2b0881318 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 7 Aug 2025 14:09:46 -0400 Subject: [PATCH 10/53] Refactor Fragment1D, Fragment2D into common base class --- test/src/unit-sparse-global-order-reader.cc | 105 +++++----- test/support/rapidcheck/array_templates.h | 4 +- test/support/src/array_templates.h | 212 ++++++++++++++------ 3 files changed, 201 insertions(+), 120 deletions(-) diff --git a/test/src/unit-sparse-global-order-reader.cc b/test/src/unit-sparse-global-order-reader.cc index e3a921c1dcb..faa9cd5d49a 100644 --- a/test/src/unit-sparse-global-order-reader.cc +++ b/test/src/unit-sparse-global-order-reader.cc @@ -189,7 +189,7 @@ struct FxRun1D { if (subarray.empty()) { return true; } else { - const CoordType coord = fragment.dim_[record]; + const CoordType coord = fragment.dimension()[record]; for (const auto& range : subarray) { if (range.contains(coord)) { return true; @@ -349,7 +349,7 @@ struct FxRun2D { if (subarray.empty() && !condition.has_value()) { return true; } else { - const int r = fragment.d1_[record], c = fragment.d2_[record]; + const int r = fragment.d1()[record], c = fragment.d2()[record]; for (const auto& range : subarray) { if (range.first.has_value() && !range.first->contains(r)) { continue; @@ -1335,23 +1335,23 @@ TEST_CASE_METHOD( // Write a fragment F0 with unique coordinates InstanceType::FragmentType fragment0; - fragment0.dim_.resize(fragment_size); - std::iota(fragment0.dim_.begin(), fragment0.dim_.end(), 1); + fragment0.dimension().resize(fragment_size); + std::iota(fragment0.dimension().begin(), fragment0.dimension().end(), 1); // Write a fragment F1 with lots of duplicates // [100,100,100,100,100,101,101,101,101,101,102,102,102,102,102,...] InstanceType::FragmentType fragment1; - fragment1.dim_.resize(fragment0.dim_.num_cells()); - for (size_t i = 0; i < fragment1.dim_.num_cells(); i++) { - fragment1.dim_[i] = - static_cast((i / 10) + (fragment0.dim_.num_cells() / 2)); + fragment1.dimension().resize(fragment0.dimension().num_cells()); + for (size_t i = 0; i < fragment1.dimension().num_cells(); i++) { + fragment1.dimension()[i] = + static_cast((i / 10) + (fragment0.dimension().num_cells() / 2)); } // atts are whatever, used just for query condition and correctness check auto& f0atts = std::get<0>(fragment0.atts_); - f0atts.resize(fragment0.dim_.num_cells()); + f0atts.resize(fragment0.dimension().num_cells()); std::iota(f0atts.begin(), f0atts.end(), 0); - for (uint64_t i = 0; i < fragment0.dim_.num_cells(); i++) { + for (uint64_t i = 0; i < fragment0.dimension().num_cells(); i++) { if ((i * i) % 7 == 0) { std::get<1>(fragment0.atts_).push_back(std::nullopt); } else { @@ -1364,9 +1364,10 @@ TEST_CASE_METHOD( } auto& f1atts = std::get<0>(fragment1.atts_); - f1atts.resize(fragment1.dim_.num_cells()); - std::iota(f1atts.begin(), f1atts.end(), int(fragment0.dim_.num_cells())); - for (uint64_t i = 0; i < fragment1.dim_.num_cells(); i++) { + f1atts.resize(fragment1.dimension().num_cells()); + std::iota( + f1atts.begin(), f1atts.end(), int(fragment0.dimension().num_cells())); + for (uint64_t i = 0; i < fragment1.dimension().num_cells(); i++) { if ((i * i) % 11 == 0) { std::get<1>(fragment1.atts_).push_back(std::nullopt); } else { @@ -1466,25 +1467,25 @@ TEST_CASE_METHOD( templates::Fragment1D fragment1; // Write a fragment F0 with tiles [1,3][3,5][5,7][7,9]... - fragment0.dim_.resize(fragment_size); - fragment0.dim_[0] = 1; - for (size_t i = 1; i < fragment0.dim_.num_cells(); i++) { - fragment0.dim_[i] = static_cast(1 + 2 * ((i + 1) / 2)); + fragment0.dimension().resize(fragment_size); + fragment0.dimension()[0] = 1; + for (size_t i = 1; i < fragment0.dimension().num_cells(); i++) { + fragment0.dimension()[i] = static_cast(1 + 2 * ((i + 1) / 2)); } // Write a fragment F1 with tiles [2,4][4,6][6,8][8,10]... - fragment1.dim_.resize(fragment0.dim_.num_cells()); - for (size_t i = 0; i < fragment1.dim_.num_cells(); i++) { - fragment1.dim_[i] = fragment0.dim_[i] + 1; + fragment1.dimension().resize(fragment0.dimension().num_cells()); + for (size_t i = 0; i < fragment1.dimension().num_cells(); i++) { + fragment1.dimension()[i] = fragment0.dimension()[i] + 1; } // atts don't really matter auto& f0atts = std::get<0>(fragment0.atts_); - f0atts.resize(fragment0.dim_.num_cells()); + f0atts.resize(fragment0.dimension().num_cells()); std::iota(f0atts.begin(), f0atts.end(), 0); auto& f1atts = std::get<0>(fragment1.atts_); - f1atts.resize(fragment1.dim_.num_cells()); + f1atts.resize(fragment1.dimension().num_cells()); std::iota(f1atts.begin(), f1atts.end(), int(f0atts.num_cells())); FxRun1D instance; @@ -1588,10 +1589,10 @@ TEST_CASE_METHOD( for (size_t f = 0; f < num_fragments; f++) { templates::Fragment1D fragment; - fragment.dim_.resize(fragment_size); + fragment.dimension().resize(fragment_size); std::iota( - fragment.dim_.begin(), - fragment.dim_.end(), + fragment.dimension().begin(), + fragment.dimension().end(), instance.array.dimension_.domain.lower_bound + static_cast(f)); auto& atts = std::get<0>(fragment.atts_); @@ -1715,10 +1716,10 @@ TEST_CASE_METHOD( for (size_t f = 0; f < num_fragments; f++) { templates::Fragment1D fragment; - fragment.dim_.resize(fragment_size); + fragment.dimension().resize(fragment_size); std::iota( - fragment.dim_.begin(), - fragment.dim_.end(), + fragment.dimension().begin(), + fragment.dimension().end(), static_cast(f * (fragment_size - 1))); auto& atts = std::get<0>(fragment.atts_); @@ -1896,13 +1897,13 @@ TEST_CASE_METHOD( for (size_t f = 0; f < num_fragments; f++) { templates::Fragment2D fdata; - fdata.d1_.reserve(fragment_size); - fdata.d2_.reserve(fragment_size); + fdata.d1().reserve(fragment_size); + fdata.d2().reserve(fragment_size); std::get<0>(fdata.atts_).reserve(fragment_size); for (size_t i = 0; i < fragment_size; i++) { - fdata.d1_.push_back(row(f, i)); - fdata.d2_.push_back(col(f, i)); + fdata.d1().push_back(row(f, i)); + fdata.d2().push_back(col(f, i)); std::get<0>(fdata.atts_) .push_back(static_cast(f * fragment_size + i)); } @@ -2100,34 +2101,34 @@ TEST_CASE_METHOD( const int tcol = instance.d2.domain.lower_bound + static_cast(f * instance.d2.extent); for (int i = 0; i < instance.d1.extent * instance.d2.extent - 2; i++) { - fdata.d1_.push_back(trow + i / instance.d1.extent); - fdata.d2_.push_back(tcol + i % instance.d1.extent); + fdata.d1().push_back(trow + i / instance.d1.extent); + fdata.d2().push_back(tcol + i % instance.d1.extent); std::get<0>(fdata.atts_).push_back(att++); } // then some sparse coords in the next space tile, // fill the data tile (if the capacity is 4), we'll call it T - fdata.d1_.push_back(trow); - fdata.d2_.push_back(tcol + instance.d2.extent); + fdata.d1().push_back(trow); + fdata.d2().push_back(tcol + instance.d2.extent); std::get<0>(fdata.atts_).push_back(att++); - fdata.d1_.push_back(trow + instance.d1.extent - 1); - fdata.d2_.push_back(tcol + instance.d2.extent + 2); + fdata.d1().push_back(trow + instance.d1.extent - 1); + fdata.d2().push_back(tcol + instance.d2.extent + 2); std::get<0>(fdata.atts_).push_back(att++); // then begin a new data tile "Tnext" which straddles the bounds of that // space tile. this will have a low MBR. - fdata.d1_.push_back(trow + instance.d1.extent - 1); - fdata.d2_.push_back(tcol + instance.d2.extent + 3); + fdata.d1().push_back(trow + instance.d1.extent - 1); + fdata.d2().push_back(tcol + instance.d2.extent + 3); std::get<0>(fdata.atts_).push_back(att++); - fdata.d1_.push_back(trow); - fdata.d2_.push_back(tcol + 2 * instance.d2.extent); + fdata.d1().push_back(trow); + fdata.d2().push_back(tcol + 2 * instance.d2.extent); std::get<0>(fdata.atts_).push_back(att++); // then add a point P which is less than the lower bound of Tnext's MBR, // and also between the last two coordinates of T FxRun2D::FragmentType fpoint; - fpoint.d1_.push_back(trow + instance.d1.extent - 1); - fpoint.d2_.push_back(tcol + instance.d1.extent + 1); + fpoint.d1().push_back(trow + instance.d1.extent - 1); + fpoint.d2().push_back(tcol + instance.d1.extent + 1); std::get<0>(fpoint.atts_).push_back(att++); instance.fragments.push_back(fdata); @@ -2242,13 +2243,13 @@ TEST_CASE_METHOD( for (size_t f = 0; f < num_fragments; f++) { FxRunType::FragmentType fragment; - fragment.dim_.resize(fragment_size); + fragment.dimension().resize(fragment_size); std::iota( - fragment.dim_.begin(), - fragment.dim_.end(), + fragment.dimension().begin(), + fragment.dimension().end(), dimension.domain.lower_bound); - std::get<0>(fragment.atts_).resize(fragment.dim_.num_cells()); + std::get<0>(fragment.atts_).resize(fragment.dimension().num_cells()); std::iota( std::get<0>(fragment.atts_).begin(), std::get<0>(fragment.atts_).end(), @@ -3192,8 +3193,8 @@ TEST_CASE_METHOD( for (uint64_t t = 0; t < fragment_same_timestamp_runs.size(); t++) { for (uint64_t f = 0; f < fragment_same_timestamp_runs[t]; f++) { FxRun2D::FragmentType fragment; - fragment.d1_ = {1, 2 + static_cast(t)}; - fragment.d2_ = {1, 2 + static_cast(f)}; + fragment.d1() = {1, 2 + static_cast(t)}; + fragment.d2() = {1, 2 + static_cast(f)}; std::get<0>(fragment.atts_) = std::vector{ static_cast(instance.fragments.size()), static_cast(instance.fragments.size())}; @@ -3939,11 +3940,11 @@ void show(const FxRun2D& instance, std::ostream& os) { os << "\t\t{" << std::endl; os << "\t\t\t\"d1\": [" << std::endl; os << "\t\t\t\t"; - show(fragment.d1_, os); + show(fragment.d1(), os); os << std::endl; os << "\t\t\t\"d2\": [" << std::endl; os << "\t\t\t\t"; - show(fragment.d2_, os); + show(fragment.d2(), os); os << std::endl; os << "\t\t\t], " << std::endl; os << "\t\t\t\"atts\": [" << std::endl; diff --git a/test/support/rapidcheck/array_templates.h b/test/support/rapidcheck/array_templates.h index 41f5ac82495..bfcd84f3fef 100644 --- a/test/support/rapidcheck/array_templates.h +++ b/test/support/rapidcheck/array_templates.h @@ -195,7 +195,7 @@ Gen> make_fragment_1d( stdx::transpose(cells)); return Fragment1D{ - .dim_ = coords, .atts_ = atts}; + std::make_tuple(coords), atts}; }); } @@ -233,7 +233,7 @@ Gen> make_fragment_2d( stdx::transpose(cells)); return Fragment2D{ - .d1_ = coords_d1, .d2_ = coords_d2, .atts_ = atts}; + std::make_tuple(coords_d1, coords_d2), atts}; }); } diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index 6e7b7a4bd78..e05ed41a067 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -438,6 +438,10 @@ struct query_buffers { : values_(cells) { } + query_buffers(std::initializer_list cells) + : values_(cells) { + } + bool operator==(const self_type&) const = default; uint64_t num_cells() const { @@ -483,6 +487,11 @@ struct query_buffers { return *this; } + self_type& operator=(const std::initializer_list& values) { + values_ = values; + return *this; + } + query_field_size_type make_field_size(uint64_t cell_limit) const { return sizeof(T) * std::min(cell_limit, values_.size()); } @@ -1127,82 +1136,146 @@ struct query_buffers>> { } }; -/** - * Data for a one-dimensional array - */ -template -struct Fragment1D { - using DimensionType = D; +template +struct Fragment { + private: + template + struct to_query_buffers { + using value_type = std::tuple...>; + using ref_type = std::tuple&...>; + using const_ref_type = std::tuple&...>; + }; + + template + static to_query_buffers::value_type f_qb_value(std::tuple) { + return std::declval::value_type>(); + } + + template + static to_query_buffers::ref_type f_qb_ref(std::tuple) { + return std::declval::ref_type>(); + } - query_buffers dim_; - std::tuple...> atts_; + template + static to_query_buffers::const_ref_type f_qb_const_ref( + std::tuple) { + return std::declval::const_ref_type>(); + } + + template + using value_tuple_query_buffers = decltype(f_qb_value(std::declval())); + + template + using ref_tuple_query_buffers = decltype(f_qb_ref(std::declval())); + + template + using const_ref_tuple_query_buffers = + decltype(f_qb_const_ref(std::declval())); + + public: + using DimensionBuffers = value_tuple_query_buffers; + using DimensionBuffersRef = ref_tuple_query_buffers; + using DimensionBuffersConstRef = + const_ref_tuple_query_buffers; + + using AttributeBuffers = value_tuple_query_buffers; + using AttributeBuffersRef = ref_tuple_query_buffers; + using AttributeBuffersConstRef = + const_ref_tuple_query_buffers; + + DimensionBuffers dims_; + AttributeBuffers atts_; + + uint64_t num_cells() const { + static_assert( + std::tuple_size::value > 0 || + std::tuple_size::value > 0); + + if constexpr (std::tuple_size::value == 0) { + return std::get<0>(atts_).num_cells(); + } else { + return std::get<0>(atts_).num_cells(); + } + } uint64_t size() const { - return dim_.num_cells(); + return num_cells(); } - std::tuple&> dimensions() const { - return std::tuple&>(dim_); + const DimensionBuffersConstRef dimensions() const { + return std::apply( + [](const auto&... field) { return std::forward_as_tuple(field...); }, + dims_); } - std::tuple&...> attributes() const { + DimensionBuffersRef dimensions() { return std::apply( - [](const query_buffers&... attribute) { - return std::tuple&...>(attribute...); - }, - atts_); + [](auto&... field) { return std::forward_as_tuple(field...); }, dims_); } - std::tuple&> dimensions() { - return std::tuple&>(dim_); + const AttributeBuffersConstRef attributes() const { + return std::apply( + [](const auto&... field) { return std::forward_as_tuple(field...); }, + atts_); } - std::tuple&...> attributes() { + AttributeBuffersRef attributes() { return std::apply( - [](query_buffers&... attribute) { - return std::tuple&...>(attribute...); + [](auto&... field) { return std::forward_as_tuple(field...); }, atts_); + } + + void reserve(uint64_t num_cells) { + std::apply( + [num_cells](Ts&... field) { + (field.reserve(num_cells), ...); }, - atts_); + std::tuple_cat(dimensions(), attributes())); + } + + void resize(uint64_t num_cells) { + std::apply( + [num_cells](Ts&... field) { + (field.resize(num_cells), ...); + }, + std::tuple_cat(dimensions(), attributes())); } }; /** - * Data for a two-dimensional array + * Data for a one-dimensional array */ -template -struct Fragment2D { - query_buffers d1_; - query_buffers d2_; - std::tuple...> atts_; +template +struct Fragment1D : public Fragment, std::tuple> { + using DimensionType = D; - uint64_t size() const { - return d1_.num_cells(); + const query_buffers& dimension() const { + return std::get<0>(this->dimensions()); + } + + query_buffers& dimension() { + return std::get<0>(this->dimensions()); } +}; - std::tuple&, const query_buffers&> dimensions() - const { - return std::tuple&, const query_buffers&>( - d1_, d2_); +/** + * Data for a two-dimensional array + */ +template +struct Fragment2D : public Fragment, std::tuple> { + const query_buffers& d1() const { + return std::get<0>(this->dimensions()); } - std::tuple&, query_buffers&> dimensions() { - return std::tuple&, query_buffers&>(d1_, d2_); + const query_buffers& d2() const { + return std::get<1>(this->dimensions()); } - std::tuple&...> attributes() const { - return std::apply( - [](const query_buffers&... attribute) { - return std::tuple&...>(attribute...); - }, - atts_); + query_buffers& d1() { + return std::get<0>(this->dimensions()); } - std::tuple&...> attributes() { - return std::apply( - [](query_buffers&... attribute) { - return std::tuple&...>(attribute...); - }, - atts_); + query_buffers& d2() { + return std::get<0>(this->dimensions()); } }; @@ -1351,24 +1424,31 @@ void set_fields( std::decay_t, std::tuple_size_v>::value(field_cursors); - [&](std::tuple fields) { - query_applicator::set( - ctx, - query, - split_sizes.first, - fields, - dimension_name, - split_cursors.first); - }(fragment.dimensions()); - [&](std::tuple fields) { - query_applicator::set( - ctx, - query, - split_sizes.second, - fields, - attribute_name, - split_cursors.second); - }(fragment.attributes()); + if constexpr (!std:: + is_same_v>) { + [&](std::tuple fields) { + query_applicator::set( + ctx, + query, + split_sizes.first, + fields, + dimension_name, + split_cursors.first); + }(fragment.dimensions()); + } + + if constexpr (!std:: + is_same_v>) { + [&](std::tuple fields) { + query_applicator::set( + ctx, + query, + split_sizes.second, + fields, + attribute_name, + split_cursors.second); + }(fragment.attributes()); + } } /** From a018738bcdf55d8b5814e8e4b4cd34882a70ebf0 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 7 Aug 2025 14:45:11 -0400 Subject: [PATCH 11/53] Fragment metadata global order bounds example passes --- test/CMakeLists.txt | 1 + .../unit-fragment-info-global-order-bounds.cc | 275 ++++++++++++++++++ test/support/src/array_templates.h | 20 +- 3 files changed, 289 insertions(+), 7 deletions(-) create mode 100644 test/src/unit-fragment-info-global-order-bounds.cc diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 92595cdf557..872908e738e 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -110,6 +110,7 @@ set(TILEDB_UNIT_TEST_SOURCES src/unit-enumerations.cc src/unit-enum-helpers.cc src/unit-filter-buffer.cc + src/unit-fragment-info-global-order-bounds.cc src/unit-global-order.cc src/unit-ordered-dim-label-reader.cc src/unit-tile-metadata.cc diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc new file mode 100644 index 00000000000..e7d9b70afb2 --- /dev/null +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -0,0 +1,275 @@ +/** + * @file unit-sparse-global-order-reader.cc + * + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2025 TileDB, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * @section DESCRIPTION + * + * Tests populating fragment metadata with the global order lower/upper bound + * coordinates per tile. + */ + +#include "tiledb/api/c_api/array_schema/array_schema_api_internal.h" +#include "tiledb/sm/misc/comparators.h" + +#include +#include + +using namespace tiledb; +using namespace tiledb::test; + +/** + * @return another fragment containing the contents of the argument sorted in + * global order based on `array.schema()`. + */ +template +static F make_global_order( + const Array& array, const F& fragment, tiledb_layout_t layout) { + if (layout == TILEDB_GLOBAL_ORDER) { + return fragment; + } + + std::vector idxs(fragment.size(), 0); + std::iota(idxs.begin(), idxs.end(), 0); + + // sort in global order + sm::GlobalCellCmp globalcmp(array.schema().ptr()->array_schema()->domain()); + + auto icmp = [&](uint64_t ia, uint64_t ib) -> bool { + return std::apply( + [&globalcmp, ia, ib]( + const templates::query_buffers&... dims) { + const auto l = std::make_tuple(dims[ia]...); + const auto r = std::make_tuple(dims[ib]...); + return globalcmp( + templates::global_cell_cmp_std_tuple(l), + templates::global_cell_cmp_std_tuple(r)); + }, + fragment.dimensions()); + }; + std::sort(idxs.begin(), idxs.end(), icmp); + + F sorted = fragment; + sorted.dimensions() = + stdx::select(stdx::reference_tuple(sorted.dimensions()), std::span(idxs)); + if constexpr (!std::is_same_v>) { + sorted.attributes() = stdx::select( + stdx::reference_tuple(sorted.attributes()), std::span(idxs)); + } + + return sorted; +} + +template +using DimensionTuple = + stdx::decay_tuple().dimensions())>; + +template +auto tuple_index(const T& tuple, uint64_t idx) { + return std::apply( + [&](const auto&... field) { return std::make_tuple(field[idx]...); }, + tuple); +} + +template +using CoordsTuple = decltype(tuple_index( + std::declval().dimensions(), std::declval())); + +template +static void prepare_bound_buffers( + DimensionTuple& bufs, std::array& ptrs) { + uint64_t i = 0; + std::apply( + [&](templates::query_buffers&... qbufs) { + ([&]() { ptrs[i++] = static_cast(&qbufs[0]); }(), ...); + }, + bufs); +} + +/** + * @return the lower and upper bounds of tile `(f, t)` in the fragment info + */ +template +static std::pair, CoordsTuple> global_order_bounds( + const FragmentInfo& finfo, uint64_t fragment, uint64_t tile) { + constexpr size_t num_fields = std::tuple_size>::value; + + // FIXME: there needs to be another API to ask about maximum variable-length. + // Otherwise it is unsafe to call this API with variable-length dimensions + + DimensionTuple lb, ub; + std::apply( + [](templates::query_buffers&... field) { + (field.resize(1), ...); + }, + lb); + std::apply( + [](templates::query_buffers&... field) { + (field.resize(1), ...); + }, + ub); + + size_t lb_sizes[num_fields]; + std::array lb_dimensions; + prepare_bound_buffers(lb, lb_dimensions); + + size_t ub_sizes[num_fields]; + std::array ub_dimensions; + prepare_bound_buffers(ub, ub_dimensions); + + auto ctx_c = finfo.context().ptr().get(); + + // FIXME: add C++ API + auto rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx_c, + finfo.ptr().get(), + fragment, + tile, + &lb_sizes[0], + &lb_dimensions[0]); + throw_if_error(ctx_c, rc); + + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx_c, + finfo.ptr().get(), + fragment, + tile, + &ub_sizes[0], + &ub_dimensions[0]); + throw_if_error(ctx_c, rc); + + return std::make_pair(tuple_index(lb, 0), tuple_index(ub, 0)); +} + +/** + * Asserts that when a set of fragments are written, the fragment metadata + * accurately reflects the expected global order bounds of the input. + * + * @return the global order bounds for each tile per fragment + */ +template +std::vector, CoordsTuple>>> instance( + Context ctx, + const std::string& array_uri, + const std::vector& fragments, + tiledb_layout_t layout = TILEDB_GLOBAL_ORDER) { + // write each fragment + { + Array forwrite(ctx, array_uri, TILEDB_WRITE); + for (const auto& fragment : fragments) { + templates::query::write_fragment(fragment, forwrite); + } + } + + // check bounds + std::vector, CoordsTuple>>> bounds; + Array forread(ctx, array_uri, TILEDB_READ); + + const uint64_t tile_stride = forread.schema().capacity(); + + FragmentInfo finfo(ctx, array_uri); + finfo.load(); + + for (size_t f = 0; f < fragments.size(); f++) { + const auto fragment = make_global_order(forread, fragments[f], layout); + + std::decay_t fragment_bounds; + + const uint64_t num_tiles = finfo.mbr_num(f); + for (size_t t = 0; t < num_tiles; t++) { + const uint64_t lbi = t * tile_stride; + const uint64_t ubi = std::min((t + 1) * tile_stride, fragment.size()) - 1; + + const auto lbexpect = tuple_index(fragment.dimensions(), lbi); + const auto ubexpect = tuple_index(fragment.dimensions(), ubi); + + const auto [lbactual, ubactual] = global_order_bounds(finfo, f, t); + ASSERTER(lbexpect == lbactual); + ASSERTER(ubexpect == ubactual); + + fragment_bounds.push_back(std::make_pair(lbactual, ubactual)); + } + + bounds.push_back(fragment_bounds); + } + + return bounds; +} + +TEST_CASE( + "Fragment metadata global order bounds: 1D fixed", + "[fragment_info][global-order]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_1d_fixed"); + + const templates::Dimension dimension( + templates::Domain(1, 1024), 16); + templates::ddl::create_array( + array_uri, + vfs_test_setup.ctx(), + std::tuple&>{dimension}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + false); + + using Fragment = templates::Fragment1D; + + auto make_expect = + [](std::initializer_list> tile_bounds) { + std::vector, std::tuple>> + out_bounds; + for (const auto& tile : tile_bounds) { + out_bounds.push_back(std::make_pair( + std::make_tuple(tile.first), std::make_tuple(tile.second))); + } + return out_bounds; + }; + + SECTION("Example 1") { + Fragment f; + f.resize(64); + std::iota(f.dimension().begin(), f.dimension().end(), 1); + + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), array_uri, std::vector{f}); + REQUIRE(fragment_bounds.size() == 1); + + const auto expect = make_expect({ + {1, 8}, + {9, 16}, + {17, 24}, + {25, 32}, + {33, 40}, + {41, 48}, + {49, 56}, + {57, 64}, + }); + CHECK(fragment_bounds[0] == expect); + } +} diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index e05ed41a067..f02ab263418 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -1183,18 +1183,21 @@ struct Fragment { using AttributeBuffersConstRef = const_ref_tuple_query_buffers; + static constexpr size_t NUM_DIMENSIONS = + std::tuple_size::value; + static constexpr size_t NUM_ATTRIBUTES = + std::tuple_size::value; + DimensionBuffers dims_; AttributeBuffers atts_; uint64_t num_cells() const { - static_assert( - std::tuple_size::value > 0 || - std::tuple_size::value > 0); + static_assert(NUM_DIMENSIONS > 0 || NUM_ATTRIBUTES > 0); - if constexpr (std::tuple_size::value == 0) { + if constexpr (NUM_DIMENSIONS == 0) { return std::get<0>(atts_).num_cells(); } else { - return std::get<0>(atts_).num_cells(); + return std::get<0>(dims_).num_cells(); } } @@ -1472,12 +1475,15 @@ void write_fragment( Query query(forwrite.context(), forwrite, TILEDB_WRITE); query.set_layout(layout); - auto field_sizes = make_field_sizes(fragment); + // for writes the arg is read-only + Fragment& nonconst = const_cast(fragment); + + auto field_sizes = make_field_sizes(nonconst); templates::query::set_fields( query.ctx().ptr().get(), query.ptr().get(), field_sizes, - fragment, + nonconst, [](unsigned d) { return "d" + std::to_string(d + 1); }, [](unsigned a) { return "a" + std::to_string(a + 1); }); From 7a1e6e63e3943c6d568dc55cdf9674c349f1b305 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 7 Aug 2025 19:09:24 -0400 Subject: [PATCH 12/53] Add unordered 1D test --- .../unit-fragment-info-global-order-bounds.cc | 35 ++++++++++++++----- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index e7d9b70afb2..50c397b444b 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -179,7 +179,7 @@ std::vector, CoordsTuple>>> instance( { Array forwrite(ctx, array_uri, TILEDB_WRITE); for (const auto& fragment : fragments) { - templates::query::write_fragment(fragment, forwrite); + templates::query::write_fragment(fragment, forwrite, layout); } } @@ -250,15 +250,9 @@ TEST_CASE( return out_bounds; }; - SECTION("Example 1") { + SECTION("Ascending fragment") { Fragment f; f.resize(64); - std::iota(f.dimension().begin(), f.dimension().end(), 1); - - const auto fragment_bounds = - instance( - vfs_test_setup.ctx(), array_uri, std::vector{f}); - REQUIRE(fragment_bounds.size() == 1); const auto expect = make_expect({ {1, 8}, @@ -270,6 +264,29 @@ TEST_CASE( {49, 56}, {57, 64}, }); - CHECK(fragment_bounds[0] == expect); + + SECTION("Global Order") { + std::iota(f.dimension().begin(), f.dimension().end(), 1); + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), array_uri, std::vector{f}); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect); + } + + SECTION("Unordered") { + for (uint64_t i = 0; i < f.size(); i++) { + f.dimension()[i] = f.size() - i; + } + + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{f}, + TILEDB_UNORDERED); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect); + } } } From b6395726c0639945e597c5107c72b8fc1add858a Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Fri, 8 Aug 2025 14:15:17 -0400 Subject: [PATCH 13/53] Rapidcheck runs, prints minimum --- .../unit-fragment-info-global-order-bounds.cc | 155 +++++++++++++++++- test/support/rapidcheck/array_templates.h | 37 ++++- test/support/rapidcheck/show.cc | 29 ++++ 3 files changed, 209 insertions(+), 12 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 50c397b444b..f85b1f0f2db 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -34,12 +34,30 @@ #include "tiledb/api/c_api/array_schema/array_schema_api_internal.h" #include "tiledb/sm/misc/comparators.h" +#include +#include #include #include using namespace tiledb; using namespace tiledb::test; +using Fragment1DFixed = templates::Fragment1D; + +void showValue(const Fragment1DFixed& value, std::ostream& os) { + rc::showFragment(value, os); +} + +namespace rc::detail { +template +struct ShowDefault { + static void show(const Fragment1DFixed& value, std::ostream& os) { + rc::showFragment(value, os); + } +}; + +} // namespace rc::detail + /** * @return another fragment containing the contents of the argument sorted in * global order based on `array.schema()`. @@ -108,11 +126,14 @@ static void prepare_bound_buffers( bufs); } +template +using Bounds = std::pair, CoordsTuple>; + /** * @return the lower and upper bounds of tile `(f, t)` in the fragment info */ template -static std::pair, CoordsTuple> global_order_bounds( +static Bounds global_order_bounds( const FragmentInfo& finfo, uint64_t fragment, uint64_t tile) { constexpr size_t num_fields = std::tuple_size>::value; @@ -167,11 +188,16 @@ static std::pair, CoordsTuple> global_order_bounds( * Asserts that when a set of fragments are written, the fragment metadata * accurately reflects the expected global order bounds of the input. * + * "Accurately reflects" means that: + * 1) the lower bound is indeed the first coordinate in global order in the + * fragment 2) the upper bound is indeed the last coordinate in global order in + * the fragment + * * @return the global order bounds for each tile per fragment */ template -std::vector, CoordsTuple>>> instance( - Context ctx, +std::vector>> instance( + const Context& ctx, const std::string& array_uri, const std::vector& fragments, tiledb_layout_t layout = TILEDB_GLOBAL_ORDER) { @@ -226,7 +252,10 @@ TEST_CASE( "fragment_metadata_global_order_bounds_1d_fixed"); const templates::Dimension dimension( - templates::Domain(1, 1024), 16); + templates::Domain(0, 1024 * 8), 16); + + const bool allow_dups = GENERATE(true, false); + templates::ddl::create_array( array_uri, vfs_test_setup.ctx(), @@ -235,7 +264,10 @@ TEST_CASE( TILEDB_ROW_MAJOR, TILEDB_ROW_MAJOR, 8, - false); + allow_dups); + + DeleteArrayGuard delarray( + vfs_test_setup.ctx().ptr().get(), array_uri.c_str()); using Fragment = templates::Fragment1D; @@ -250,6 +282,30 @@ TEST_CASE( return out_bounds; }; + SECTION("Minimum write") { + Fragment f; + f.resize(1); + f.dimension()[0] = 1; + + std::vector>> fragment_bounds; + SECTION("Global Order") { + fragment_bounds = instance( + vfs_test_setup.ctx(), array_uri, std::vector{f}); + } + + SECTION("Unordered") { + fragment_bounds = instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{f}, + TILEDB_UNORDERED); + } + REQUIRE(fragment_bounds.size() == 1); + CHECK( + fragment_bounds[0] == std::vector>{std::make_pair( + std::make_tuple(1), std::make_tuple(1))}); + } + SECTION("Ascending fragment") { Fragment f; f.resize(64); @@ -268,8 +324,8 @@ TEST_CASE( SECTION("Global Order") { std::iota(f.dimension().begin(), f.dimension().end(), 1); const auto fragment_bounds = - instance( - vfs_test_setup.ctx(), array_uri, std::vector{f}); + instance( + vfs_test_setup.ctx(), array_uri, std::vector{f}); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect); } @@ -280,13 +336,94 @@ TEST_CASE( } const auto fragment_bounds = - instance( + instance( vfs_test_setup.ctx(), array_uri, - std::vector{f}, + std::vector{f}, TILEDB_UNORDERED); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect); } } + + if (allow_dups) { + SECTION("Duplicates") { + Fragment f; + f.dimension() = {0, 0, 0, 0, 0, 0, 0, 0, 1}; + + const auto expect = make_expect({{0, 1}}); + + SECTION("Global Order") { + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{f}); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect); + } + } + } +} + +TEST_CASE( + "Fragment metadata global order bounds: 1D fixed rapidcheck", + "[fragment_info][global-order][rapidcheck]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_1d_fixed"); + + static constexpr uint64_t LB = 0; + static constexpr uint64_t UB = 1024 * 8; + const templates::Domain domain(LB, UB); + const templates::Dimension dimension(domain, 16); + + Context ctx = vfs_test_setup.ctx(); + + auto temp_array = [&](bool allow_dups) { + templates::ddl::create_array( + array_uri, + ctx, + std::tuple&>{dimension}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); + }; + + rc::prop("global order", [&](bool allow_dups) { + auto fragments = *rc::gen::container>( + rc::make_fragment_1d(allow_dups, domain)); + auto arrayguard = temp_array(allow_dups); + Array forread(ctx, array_uri, TILEDB_READ); + std::vector global_order_fragments; + for (const auto& fragment : fragments) { + global_order_fragments.push_back(make_global_order( + forread, fragment, TILEDB_UNORDERED)); + } + + instance( + vfs_test_setup.ctx(), + array_uri, + global_order_fragments, + TILEDB_GLOBAL_ORDER); + }); + + /* + rc::prop( + "unordered", + [&](std::vector> fragments) { + auto arrayguard = temp_array(); + Array forread(ctx, array_uri, TILEDB_READ); + std::vector fs; + for (auto fragment : fragments) { + fs.push_back(std::move(fragment.f_)); + } + instance( + vfs_test_setup.ctx(), array_uri, fs, TILEDB_UNORDERED); + }); + */ } diff --git a/test/support/rapidcheck/array_templates.h b/test/support/rapidcheck/array_templates.h index bfcd84f3fef..beec67e8cd2 100644 --- a/test/support/rapidcheck/array_templates.h +++ b/test/support/rapidcheck/array_templates.h @@ -237,9 +237,40 @@ Gen> make_fragment_2d( }); } -template <> -void show>(const templates::Domain& domain, std::ostream& os) { - os << "[" << domain.lower_bound << ", " << domain.upper_bound << "]"; +void showValue(const templates::Domain& domain, std::ostream& os); +void showValue(const templates::Domain& domain, std::ostream& os); +void showValue(const templates::Domain& domain, std::ostream& os); + +namespace detail { + +template +struct ShowDefault, A, B> { + static void show(const query_buffers& value, std::ostream& os) { + ::rc::show(value.values_, os); + } +}; + +} // namespace detail + +template +void showFragment( + const templates::Fragment& value, + std::ostream& os) { + auto showField = [&](const query_buffers& field) { + os << "\t\t"; + show(field, os); + os << std::endl; + }; + os << "{" << std::endl << "\t\"dimensions\": [" << std::endl; + std::apply( + [&](const auto&... dimension) { (showField(dimension), ...); }, + value.dimensions()); + os << "\t]" << std::endl; + os << "\t\"attributes\": [" << std::endl; + std::apply( + [&](const auto&... attribute) { (showField(attribute), ...); }, + value.attributes()); + os << "\t]" << std::endl << "}" << std::endl; } } // namespace rc diff --git a/test/support/rapidcheck/show.cc b/test/support/rapidcheck/show.cc index f3aeb2426db..4853e8ed3ff 100644 --- a/test/support/rapidcheck/show.cc +++ b/test/support/rapidcheck/show.cc @@ -32,6 +32,7 @@ * header files. */ +#include #include #include "tiledb/sm/enums/query_condition_op.h" @@ -39,6 +40,26 @@ namespace rc::detail { +template +void showDomain(const templates::Domain& domain, std::ostream& os) { + os << "[" << domain.lower_bound << ", " << domain.upper_bound << "]"; +} + +void showValue(const templates::Domain& domain, std::ostream& os) { + showDomain(domain, os); +} +void showValue(const templates::Domain& domain, std::ostream& os) { + showDomain(domain, os); +} +void showValue(const templates::Domain& domain, std::ostream& os) { + showDomain(domain, os); +} + +template +void showQueryBuffers(const templates::query_buffers& qb, std::ostream& os) { + show(qb.values_, os); +} + void showValue(const tiledb::sm::ASTNode& node, std::ostream& os) { const tiledb::sm::ASTNodeVal* valnode = static_cast(&node); @@ -77,3 +98,11 @@ void showValue(const tiledb::sm::ASTNode& node, std::ostream& os) { } } // namespace rc::detail + +namespace rc { + +void showValue(const templates::query_buffers& qb, std::ostream& os) { + detail::showQueryBuffers(qb, os); +} + +} // namespace rc From 9c900b3f5516d9c67d570a1ff79e4f666ea43472 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Fri, 8 Aug 2025 15:16:48 -0400 Subject: [PATCH 14/53] Add RestProfile::file_exists to avoid throwing exception for failing to load default profile in ordinary execution --- tiledb/sm/config/config.cc | 10 +++++++--- tiledb/sm/rest/rest_profile.cc | 6 +++++- tiledb/sm/rest/rest_profile.h | 5 +++++ 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/tiledb/sm/config/config.cc b/tiledb/sm/config/config.cc index d32007c00f2..b999b9940b8 100644 --- a/tiledb/sm/config/config.cc +++ b/tiledb/sm/config/config.cc @@ -948,15 +948,19 @@ const char* Config::get_from_profile( std::optional profile_dir = found_dir ? std::make_optional(profile_dir_cstr) : std::nullopt; + const bool isNonDefaultProfile = + ((profile_name.has_value() && !profile_name.value().empty()) || + (profile_dir.has_value() && !profile_dir.value().empty())); try { // Create a Profile object and load the profile rest_profile_ = RestProfile(profile_name, profile_dir); - rest_profile_.value().load_from_file(); + if (rest_profile_.value().file_exists() || isNonDefaultProfile) { + rest_profile_.value().load_from_file(); + } } catch (const std::exception&) { // Throw an exception if the user has specified profile-related // parameters but the profile could not be loaded. - if ((profile_name.has_value() && !profile_name.value().empty()) || - (profile_dir.has_value() && !profile_dir.value().empty())) { + if (isNonDefaultProfile) { throw ConfigException( "Failed to load the REST profile. " "Please check the profile name and directory parameters."); diff --git a/tiledb/sm/rest/rest_profile.cc b/tiledb/sm/rest/rest_profile.cc index 206a9b10beb..d0e310ed075 100644 --- a/tiledb/sm/rest/rest_profile.cc +++ b/tiledb/sm/rest/rest_profile.cc @@ -226,8 +226,12 @@ void RestProfile::save_to_file(const bool overwrite) { write_file(data, filepath_); } +bool RestProfile::file_exists() const { + return std::filesystem::exists(filepath_); +} + void RestProfile::load_from_file() { - if (std::filesystem::exists(filepath_)) { + if (file_exists()) { // If the local file exists, load the profile with the given name. load_from_json_file(filepath_); } else { diff --git a/tiledb/sm/rest/rest_profile.h b/tiledb/sm/rest/rest_profile.h index 21b988c063b..28fd70afa14 100644 --- a/tiledb/sm/rest/rest_profile.h +++ b/tiledb/sm/rest/rest_profile.h @@ -128,6 +128,11 @@ class RestProfile { return param_values_; } + /** + * @return true if there is a regular file at the file path expected for this + */ + bool file_exists() const; + /** * Saves this profile to the local file. * From 99e04db9c7c75eeef20b9d0522e5b667625e1400 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 11 Aug 2025 09:49:31 -0400 Subject: [PATCH 15/53] Fix offset by tile_index_base_ --- tiledb/sm/fragment/fragment_metadata.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index 64f4dc20a3c..3094c6a4744 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -345,7 +345,9 @@ void FragmentMetadata::convert_tile_min_max_var_sizes_to_offsets( } void FragmentMetadata::set_tile_global_order_bounds_fixed( - const std::string& dim_name, uint64_t tile, const WriterTileTuple& data) { + const std::string& dim_name, + uint64_t which_tile, + const WriterTileTuple& data) { iassert(data.cell_num() > 0); const auto dim = array_schema_->domain().get_dimension_index(dim_name); @@ -356,6 +358,8 @@ void FragmentMetadata::set_tile_global_order_bounds_fixed( const auto& tile_max = data.global_order_max(); iassert(tile_max.has_value()); + const uint64_t tile = which_tile + tile_index_base_; + if (array_schema_->domain().dimensions()[dim]->var_size()) { // NB: for now we set a length, and it will be updated to an offset // via `convert_tile_global_order_bounds_sizes_to_offsets`, From 83e2b3692aa21f46619f8218faf1af43e9e3e3b3 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 11 Aug 2025 10:10:15 -0400 Subject: [PATCH 16/53] Rapidcheck passes for fixed 1D no dups --- .../unit-fragment-info-global-order-bounds.cc | 29 +++++++++---------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index f85b1f0f2db..b7aef2fd463 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -254,7 +254,7 @@ TEST_CASE( const templates::Dimension dimension( templates::Domain(0, 1024 * 8), 16); - const bool allow_dups = GENERATE(true, false); + const bool allow_dups = false; // FIXME: allow_dups = true bug templates::ddl::create_array( array_uri, @@ -394,7 +394,8 @@ TEST_CASE( return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); }; - rc::prop("global order", [&](bool allow_dups) { + rc::prop("global order", [&]() { + const bool allow_dups = false; // FIXME: not working correctly auto fragments = *rc::gen::container>( rc::make_fragment_1d(allow_dups, domain)); auto arrayguard = temp_array(allow_dups); @@ -412,18 +413,14 @@ TEST_CASE( TILEDB_GLOBAL_ORDER); }); - /* - rc::prop( - "unordered", - [&](std::vector> fragments) { - auto arrayguard = temp_array(); - Array forread(ctx, array_uri, TILEDB_READ); - std::vector fs; - for (auto fragment : fragments) { - fs.push_back(std::move(fragment.f_)); - } - instance( - vfs_test_setup.ctx(), array_uri, fs, TILEDB_UNORDERED); - }); - */ + rc::prop("unordered", [&]() { + const bool allow_dups = false; // FIXME: not working correctly + auto fragments = *rc::gen::container>( + rc::make_fragment_1d(allow_dups, domain)); + auto arrayguard = temp_array(allow_dups); + Array forread(ctx, array_uri, TILEDB_READ); + + instance( + vfs_test_setup.ctx(), array_uri, fragments, TILEDB_UNORDERED); + }); } From 4947b3ade99c21da15e71ce1fd31b869ebf3cae1 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 11 Aug 2025 10:21:15 -0400 Subject: [PATCH 17/53] Turn on dups --- .../unit-fragment-info-global-order-bounds.cc | 30 ++++++++++++++----- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index b7aef2fd463..1e9f050dc86 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -254,7 +254,7 @@ TEST_CASE( const templates::Dimension dimension( templates::Domain(0, 1024 * 8), 16); - const bool allow_dups = false; // FIXME: allow_dups = true bug + const bool allow_dups = GENERATE(true, false); templates::ddl::create_array( array_uri, @@ -348,17 +348,32 @@ TEST_CASE( if (allow_dups) { SECTION("Duplicates") { - Fragment f; - f.dimension() = {0, 0, 0, 0, 0, 0, 0, 0, 1}; - - const auto expect = make_expect({{0, 1}}); + const auto expect = make_expect({{0, 0}, {1, 1}}); SECTION("Global Order") { + Fragment f; + f.dimension() = {0, 0, 0, 0, 0, 0, 0, 0, 1}; + + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{f}, + TILEDB_GLOBAL_ORDER); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect); + } + + SECTION("Unordered") { + Fragment f; + f.dimension() = {0, 0, 0, 1, 0, 0, 0, 0, 0}; + const auto fragment_bounds = instance( vfs_test_setup.ctx(), array_uri, - std::vector{f}); + std::vector{f}, + TILEDB_UNORDERED); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect); } @@ -394,8 +409,7 @@ TEST_CASE( return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); }; - rc::prop("global order", [&]() { - const bool allow_dups = false; // FIXME: not working correctly + rc::prop("global order", [&](bool allow_dups) { auto fragments = *rc::gen::container>( rc::make_fragment_1d(allow_dups, domain)); auto arrayguard = temp_array(allow_dups); From f9b93fb42367b58b48637f81eaa6fce6513234c2 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 14 Aug 2025 17:54:15 -0400 Subject: [PATCH 18/53] Fix Fragment2d::d2 --- test/support/src/array_templates.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index f02ab263418..50dade68e19 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -1278,7 +1278,7 @@ struct Fragment2D : public Fragment, std::tuple> { } query_buffers& d2() { - return std::get<0>(this->dimensions()); + return std::get<1>(this->dimensions()); } }; From b997f22b4547500c941447b74f5e0386b69116a2 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 14 Aug 2025 17:54:55 -0400 Subject: [PATCH 19/53] Test Fragment metadata global order bounds: 2D fixed unordered section --- .../unit-fragment-info-global-order-bounds.cc | 241 +++++++++++++++++- 1 file changed, 229 insertions(+), 12 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 1e9f050dc86..2f8f59246ab 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -43,11 +43,16 @@ using namespace tiledb; using namespace tiledb::test; using Fragment1DFixed = templates::Fragment1D; +using Fragment2DFixed = templates::Fragment2D; void showValue(const Fragment1DFixed& value, std::ostream& os) { rc::showFragment(value, os); } +void showValue(const Fragment2DFixed& value, std::ostream& os) { + rc::showFragment(value, os); +} + namespace rc::detail { template struct ShowDefault { @@ -64,8 +69,8 @@ struct ShowDefault { */ template static F make_global_order( - const Array& array, const F& fragment, tiledb_layout_t layout) { - if (layout == TILEDB_GLOBAL_ORDER) { + const Array& array, const F& fragment, sm::Layout layout) { + if (layout == sm::Layout::GLOBAL_ORDER) { return fragment; } @@ -200,12 +205,13 @@ std::vector>> instance( const Context& ctx, const std::string& array_uri, const std::vector& fragments, - tiledb_layout_t layout = TILEDB_GLOBAL_ORDER) { + sm::Layout layout = sm::Layout::GLOBAL_ORDER) { // write each fragment { Array forwrite(ctx, array_uri, TILEDB_WRITE); for (const auto& fragment : fragments) { - templates::query::write_fragment(fragment, forwrite, layout); + templates::query::write_fragment( + fragment, forwrite, static_cast(layout)); } } @@ -298,7 +304,7 @@ TEST_CASE( vfs_test_setup.ctx(), array_uri, std::vector{f}, - TILEDB_UNORDERED); + sm::Layout::UNORDERED); } REQUIRE(fragment_bounds.size() == 1); CHECK( @@ -340,7 +346,7 @@ TEST_CASE( vfs_test_setup.ctx(), array_uri, std::vector{f}, - TILEDB_UNORDERED); + sm::Layout::UNORDERED); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect); } @@ -359,7 +365,7 @@ TEST_CASE( vfs_test_setup.ctx(), array_uri, std::vector{f}, - TILEDB_GLOBAL_ORDER); + sm::Layout::GLOBAL_ORDER); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect); } @@ -373,7 +379,7 @@ TEST_CASE( vfs_test_setup.ctx(), array_uri, std::vector{f}, - TILEDB_UNORDERED); + sm::Layout::UNORDERED); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect); } @@ -386,7 +392,7 @@ TEST_CASE( "[fragment_info][global-order][rapidcheck]") { VFSTestSetup vfs_test_setup; const auto array_uri = vfs_test_setup.array_uri( - "fragment_metadata_global_order_bounds_1d_fixed"); + "fragment_metadata_global_order_bounds_1d_fixed_rapidcheck"); static constexpr uint64_t LB = 0; static constexpr uint64_t UB = 1024 * 8; @@ -417,14 +423,14 @@ TEST_CASE( std::vector global_order_fragments; for (const auto& fragment : fragments) { global_order_fragments.push_back(make_global_order( - forread, fragment, TILEDB_UNORDERED)); + forread, fragment, sm::Layout::UNORDERED)); } instance( vfs_test_setup.ctx(), array_uri, global_order_fragments, - TILEDB_GLOBAL_ORDER); + sm::Layout::GLOBAL_ORDER); }); rc::prop("unordered", [&]() { @@ -435,6 +441,217 @@ TEST_CASE( Array forread(ctx, array_uri, TILEDB_READ); instance( - vfs_test_setup.ctx(), array_uri, fragments, TILEDB_UNORDERED); + vfs_test_setup.ctx(), array_uri, fragments, sm::Layout::UNORDERED); }); } + +TEST_CASE( + "Fragment metadata global order bounds: 2D fixed", + "[fragment_info][global-order]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_2d_fixed"); + + const bool allow_dups = false; + + const templates::Dimension d1( + templates::Domain(-256, 256), 4); + const templates::Dimension d2( + templates::Domain(-256, 256), 4); + + templates::ddl::create_array( + array_uri, + vfs_test_setup.ctx(), + std::tie(d1, d2), + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + DeleteArrayGuard delarray( + vfs_test_setup.ctx().ptr().get(), array_uri.c_str()); + + using Fragment = templates::Fragment2D; + using TileBounds = + std::pair, std::tuple>; + + Fragment minimum; + { + minimum.resize(1); + minimum.d1()[0] = 0; + minimum.d2()[0] = 0; + } + + constexpr size_t row_num_cells = 64, col_num_cells = 64, + square_num_cells = 64; + + Fragment row, col, square, square_offset; + + row.resize(row_num_cells); + col.resize(col_num_cells); + square.resize(square_num_cells); + square_offset.resize(square_num_cells); + + const sm::Layout layout = + GENERATE(sm::Layout::UNORDERED, sm::Layout::GLOBAL_ORDER); + + if (layout == sm::Layout::UNORDERED) { + for (uint64_t i = 0; i < row_num_cells; i++) { + row.d1()[i] = 0; + row.d2()[i] = i; + } + + for (uint64_t i = 0; i < col_num_cells; i++) { + col.d1()[i] = i; + col.d2()[i] = 0; + } + + const uint64_t square_row_length = std::sqrt(square_num_cells); + for (uint64_t i = 0; i < square_num_cells; i++) { + square.d1()[i] = i / square_row_length; + square.d2()[i] = i % square_row_length; + } + for (uint64_t i = 0; i < square_num_cells; i++) { + square_offset.d1()[i] = 2 + (i / square_row_length); + square_offset.d2()[i] = 2 + (i % square_row_length); + } + } else { + SKIP("TODO"); + // TODO + } + + const std::vector expect_row_bounds = { + {{0, 0}, {0, 7}}, + {{0, 8}, {0, 15}}, + {{0, 16}, {0, 23}}, + {{0, 24}, {0, 31}}, + {{0, 32}, {0, 39}}, + {{0, 40}, {0, 47}}, + {{0, 48}, {0, 55}}, + {{0, 56}, {0, 63}}}; + + const std::vector expect_col_bounds = { + {{0, 0}, {7, 0}}, + {{8, 0}, {15, 0}}, + {{16, 0}, {23, 0}}, + {{24, 0}, {31, 0}}, + {{32, 0}, {39, 0}}, + {{40, 0}, {47, 0}}, + {{48, 0}, {55, 0}}, + {{56, 0}, {63, 0}}}; + + const std::vector expect_square_bounds = { + {{0, 0}, {1, 3}}, + {{2, 0}, {3, 3}}, + {{0, 4}, {1, 7}}, + {{2, 4}, {3, 7}}, + {{4, 0}, {5, 3}}, + {{6, 0}, {7, 3}}, + {{4, 4}, {5, 7}}, + {{6, 4}, {7, 7}}, + }; + + const std::vector expect_square_offset_bounds = { + {{2, 2}, {2, 7}}, + {{3, 4}, {3, 9}}, + {{4, 2}, {7, 3}}, + {{4, 4}, {5, 7}}, + {{6, 4}, {7, 7}}, + {{4, 8}, {7, 9}}, + {{8, 2}, {8, 7}}, + {{9, 4}, {9, 9}}}; + + SECTION("Minimum write") { + Fragment f; + f.resize(1); + f.d1()[0] = 0; + f.d2()[0] = 0; + + std::vector>> fragment_bounds; + SECTION("Global Order") { + fragment_bounds = instance( + vfs_test_setup.ctx(), array_uri, std::vector{f}); + } + + SECTION("Unordered") { + fragment_bounds = instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{f}, + sm::Layout::UNORDERED); + } + + std::vector expect_bounds = {{{0, 0}, {0, 0}}}; + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect_bounds); + } + + DYNAMIC_SECTION("Row (layout = " + sm::layout_str(layout) + ")") { + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{row}, + layout); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect_row_bounds); + } + + DYNAMIC_SECTION("Column (layout = " + sm::layout_str(layout) + ")") { + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{col}, + layout); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect_col_bounds); + } + + DYNAMIC_SECTION("Square (layout = " + sm::layout_str(layout) + ")") { + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{square}, + layout); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect_square_bounds); + } + + DYNAMIC_SECTION("Square offset (layout = " + sm::layout_str(layout) + ")") { + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{square_offset}, + layout); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0] == expect_square_offset_bounds); + } + + DYNAMIC_SECTION("Multi-fragment (layout = " + sm::layout_str(layout) + ")") { + const std::vector fragments = {col, square_offset, row, square}; + const auto fragment_bounds = + instance( + vfs_test_setup.ctx(), array_uri, fragments, layout); + CHECK(fragment_bounds.size() >= 1); + if (fragment_bounds.size() >= 1) { + CHECK(fragment_bounds[0] == expect_col_bounds); + } + CHECK(fragment_bounds.size() >= 2); + if (fragment_bounds.size() >= 2) { + CHECK(fragment_bounds[1] == expect_square_offset_bounds); + } + CHECK(fragment_bounds.size() >= 3); + if (fragment_bounds.size() >= 3) { + CHECK(fragment_bounds[2] == expect_row_bounds); + } + CHECK(fragment_bounds.size() >= 4); + if (fragment_bounds.size() >= 4) { + CHECK(fragment_bounds[3] == expect_square_bounds); + } + REQUIRE(fragment_bounds.size() == 4); + } +} From 52672dcfb1b64de042483f4f0536cea6c30583a5 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 15 Sep 2025 10:15:45 -0400 Subject: [PATCH 20/53] Test Fragment metadata global order bounds: 2D fixed global order section --- .../unit-fragment-info-global-order-bounds.cc | 112 ++++++++++-------- 1 file changed, 65 insertions(+), 47 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 2f8f59246ab..d66f7e527a3 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -78,7 +78,8 @@ static F make_global_order( std::iota(idxs.begin(), idxs.end(), 0); // sort in global order - sm::GlobalCellCmp globalcmp(array.schema().ptr()->array_schema()->domain()); + auto array_schema = array.schema().ptr()->array_schema(); + sm::GlobalCellCmp globalcmp(array_schema->domain()); auto icmp = [&](uint64_t ia, uint64_t ib) -> bool { return std::apply( @@ -459,9 +460,11 @@ TEST_CASE( const templates::Dimension d2( templates::Domain(-256, 256), 4); + Context ctx = vfs_test_setup.ctx(); + templates::ddl::create_array( array_uri, - vfs_test_setup.ctx(), + ctx, std::tie(d1, d2), std::vector>{}, TILEDB_ROW_MAJOR, @@ -469,8 +472,7 @@ TEST_CASE( 8, allow_dups); - DeleteArrayGuard delarray( - vfs_test_setup.ctx().ptr().get(), array_uri.c_str()); + DeleteArrayGuard delarray(ctx.ptr().get(), array_uri.c_str()); using Fragment = templates::Fragment2D; using TileBounds = @@ -496,29 +498,60 @@ TEST_CASE( const sm::Layout layout = GENERATE(sm::Layout::UNORDERED, sm::Layout::GLOBAL_ORDER); - if (layout == sm::Layout::UNORDERED) { - for (uint64_t i = 0; i < row_num_cells; i++) { - row.d1()[i] = 0; - row.d2()[i] = i; - } + for (uint64_t i = 0; i < row_num_cells; i++) { + row.d1()[i] = 0; + row.d2()[i] = i; + } - for (uint64_t i = 0; i < col_num_cells; i++) { - col.d1()[i] = i; - col.d2()[i] = 0; - } + for (uint64_t i = 0; i < col_num_cells; i++) { + col.d1()[i] = i; + col.d2()[i] = 0; + } - const uint64_t square_row_length = std::sqrt(square_num_cells); - for (uint64_t i = 0; i < square_num_cells; i++) { - square.d1()[i] = i / square_row_length; - square.d2()[i] = i % square_row_length; - } - for (uint64_t i = 0; i < square_num_cells; i++) { - square_offset.d1()[i] = 2 + (i / square_row_length); - square_offset.d2()[i] = 2 + (i % square_row_length); - } - } else { - SKIP("TODO"); - // TODO + const uint64_t square_row_length = std::sqrt(square_num_cells); + for (uint64_t i = 0; i < square_num_cells; i++) { + square.d1()[i] = i / square_row_length; + square.d2()[i] = i % square_row_length; + } + for (uint64_t i = 0; i < square_num_cells; i++) { + square_offset.d1()[i] = 2 + (i / square_row_length); + square_offset.d2()[i] = 2 + (i % square_row_length); + } + + if (layout == sm::Layout::GLOBAL_ORDER) { + Array forread(ctx, array_uri, TILEDB_READ); + + // row, col are in global order already + square = make_global_order(forread, square, tiledb::sm::Layout::UNORDERED); + REQUIRE( + square.d1() == std::vector{ + 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, + 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + }); + REQUIRE( + square.d2() == std::vector{ + 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, + 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, + 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, + 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, + }); + + square_offset = make_global_order( + forread, square_offset, tiledb::sm::Layout::UNORDERED); + REQUIRE( + square_offset.d1().values_ == + std::vector{2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 4, 4, 4, 4, 5, 5, 5, 5, + 6, 6, 6, 6, 7, 7, 7, 7, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 9, 9, 8, 8, 8, 8, 9, 9, 9, 9, 8, 8, 9, 9}); + REQUIRE( + square_offset.d2().values_ == + std::vector{2, 3, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 8, 9, 8, 9, + 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, + 4, 5, 6, 7, 4, 5, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, + 2, 3, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 8, 9, 8, 9}); } const std::vector expect_row_bounds = { @@ -571,15 +604,12 @@ TEST_CASE( std::vector>> fragment_bounds; SECTION("Global Order") { fragment_bounds = instance( - vfs_test_setup.ctx(), array_uri, std::vector{f}); + ctx, array_uri, std::vector{f}); } SECTION("Unordered") { fragment_bounds = instance( - vfs_test_setup.ctx(), - array_uri, - std::vector{f}, - sm::Layout::UNORDERED); + ctx, array_uri, std::vector{f}, sm::Layout::UNORDERED); } std::vector expect_bounds = {{{0, 0}, {0, 0}}}; @@ -590,10 +620,7 @@ TEST_CASE( DYNAMIC_SECTION("Row (layout = " + sm::layout_str(layout) + ")") { const auto fragment_bounds = instance( - vfs_test_setup.ctx(), - array_uri, - std::vector{row}, - layout); + ctx, array_uri, std::vector{row}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect_row_bounds); } @@ -601,10 +628,7 @@ TEST_CASE( DYNAMIC_SECTION("Column (layout = " + sm::layout_str(layout) + ")") { const auto fragment_bounds = instance( - vfs_test_setup.ctx(), - array_uri, - std::vector{col}, - layout); + ctx, array_uri, std::vector{col}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect_col_bounds); } @@ -612,10 +636,7 @@ TEST_CASE( DYNAMIC_SECTION("Square (layout = " + sm::layout_str(layout) + ")") { const auto fragment_bounds = instance( - vfs_test_setup.ctx(), - array_uri, - std::vector{square}, - layout); + ctx, array_uri, std::vector{square}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect_square_bounds); } @@ -623,10 +644,7 @@ TEST_CASE( DYNAMIC_SECTION("Square offset (layout = " + sm::layout_str(layout) + ")") { const auto fragment_bounds = instance( - vfs_test_setup.ctx(), - array_uri, - std::vector{square_offset}, - layout); + ctx, array_uri, std::vector{square_offset}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect_square_offset_bounds); } @@ -635,7 +653,7 @@ TEST_CASE( const std::vector fragments = {col, square_offset, row, square}; const auto fragment_bounds = instance( - vfs_test_setup.ctx(), array_uri, fragments, layout); + ctx, array_uri, fragments, layout); CHECK(fragment_bounds.size() >= 1); if (fragment_bounds.size() >= 1) { CHECK(fragment_bounds[0] == expect_col_bounds); From 72cdd82c3a98855bbf43bc1419a109a49b3bd1e5 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 15 Sep 2025 17:49:05 -0400 Subject: [PATCH 21/53] Add 1D var test with trivial input --- .../unit-fragment-info-global-order-bounds.cc | 218 ++++++++++++++++-- test/support/src/array_templates.h | 35 ++- .../fragment_info_api_external.h | 3 + 3 files changed, 224 insertions(+), 32 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index d66f7e527a3..b0f15784de6 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -45,6 +45,8 @@ using namespace tiledb::test; using Fragment1DFixed = templates::Fragment1D; using Fragment2DFixed = templates::Fragment2D; +using Fragment1DVar = templates::Fragment1D>; + void showValue(const Fragment1DFixed& value, std::ostream& os) { rc::showFragment(value, os); } @@ -121,13 +123,53 @@ template using CoordsTuple = decltype(tuple_index( std::declval().dimensions(), std::declval())); +/** + * Stores pointers to the fields of `bufs` in `ptrs` for use + * with the fragment bound functions. + */ template static void prepare_bound_buffers( DimensionTuple& bufs, std::array& ptrs) { + auto prepare_bound_buffer = + [&](uint64_t i, templates::query_buffers& qbuf) { + if constexpr (std::is_same_v>) { + if (qbuf.values_.empty()) { + ptrs[i] = nullptr; + } else { + ptrs[i] = qbuf.values_.data(); + } + } else { + ptrs[i] = static_cast(&qbuf[0]); + } + }; + + uint64_t i = 0; + std::apply( + [&](templates::query_buffers&... qbufs) { + (prepare_bound_buffer(i++, qbufs), ...); + }, + bufs); +} + +/** + * Reserves space in the variable-length dimensions of `bufs` + * for global order bounds of the provided `sizes`. + */ +template +static void allocate_var_bound_buffers( + DimensionTuple& bufs, size_t sizes[]) { + auto allocate_var_bound_buffer = + [&](uint64_t i, templates::query_buffers& qbuf) { + if constexpr (std::is_same_v>) { + qbuf.values_.reserve(sizes[i]); + qbuf.offsets_ = {0}; + } + }; + uint64_t i = 0; std::apply( [&](templates::query_buffers&... qbufs) { - ([&]() { ptrs[i++] = static_cast(&qbufs[0]); }(), ...); + (allocate_var_bound_buffer(i++, qbufs), ...); }, bufs); } @@ -160,32 +202,50 @@ static Bounds global_order_bounds( size_t lb_sizes[num_fields]; std::array lb_dimensions; - prepare_bound_buffers(lb, lb_dimensions); size_t ub_sizes[num_fields]; std::array ub_dimensions; + + prepare_bound_buffers(lb, lb_dimensions); prepare_bound_buffers(ub, ub_dimensions); auto ctx_c = finfo.context().ptr().get(); - // FIXME: add C++ API - auto rc = tiledb_fragment_info_get_global_order_lower_bound( - ctx_c, - finfo.ptr().get(), - fragment, - tile, - &lb_sizes[0], - &lb_dimensions[0]); - throw_if_error(ctx_c, rc); - - rc = tiledb_fragment_info_get_global_order_upper_bound( - ctx_c, - finfo.ptr().get(), - fragment, - tile, - &ub_sizes[0], - &ub_dimensions[0]); - throw_if_error(ctx_c, rc); + auto call = [&]() { + // FIXME: add C++ API + auto rc = tiledb_fragment_info_get_global_order_lower_bound( + ctx_c, + finfo.ptr().get(), + fragment, + tile, + &lb_sizes[0], + &lb_dimensions[0]); + throw_if_error(ctx_c, rc); + + rc = tiledb_fragment_info_get_global_order_upper_bound( + ctx_c, + finfo.ptr().get(), + fragment, + tile, + &ub_sizes[0], + &ub_dimensions[0]); + throw_if_error(ctx_c, rc); + }; + + static constexpr bool has_var_dimension = std::apply( + [](const templates::query_buffers&...) { + return std::disjunction_v, Ts>...>; + }, + lb); + if constexpr (has_var_dimension) { + // determine length, then allocate, then call again + call(); + allocate_var_bound_buffers(lb, lb_sizes); + allocate_var_bound_buffers(ub, ub_sizes); + call(); + } else { + call(); + } return std::make_pair(tuple_index(lb, 0), tuple_index(ub, 0)); } @@ -673,3 +733,121 @@ TEST_CASE( REQUIRE(fragment_bounds.size() == 4); } } + +TEST_CASE( + "Fragment metadata global order bounds: 2D fixed rapidcheck", + "[fragment_info][global-order][rapidcheck]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_1d_fixed_rapidcheck"); + + static constexpr int32_t LB = -256; + static constexpr int32_t UB = 256; + + const templates::Domain domain(LB, UB); + const templates::Dimension d1(domain, 4); + const templates::Dimension d2(domain, 4); + + Context ctx = vfs_test_setup.ctx(); + + auto temp_array = [&](bool allow_dups) { + templates::ddl::create_array( + array_uri, + ctx, + std::tuple< + const templates::Dimension&, + const templates::Dimension&>{d1, d2}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); + }; + + rc::prop("global order", [&](bool allow_dups) { + auto fragments = *rc::gen::container>( + rc::make_fragment_2d(allow_dups, domain, domain)); + auto arrayguard = temp_array(allow_dups); + Array forread(ctx, array_uri, TILEDB_READ); + std::vector global_order_fragments; + for (const auto& fragment : fragments) { + global_order_fragments.push_back(make_global_order( + forread, fragment, sm::Layout::UNORDERED)); + } + + instance( + vfs_test_setup.ctx(), + array_uri, + global_order_fragments, + sm::Layout::GLOBAL_ORDER); + }); + + rc::prop("unordered", [&]() { + const bool allow_dups = false; // FIXME: not working correctly + auto fragments = *rc::gen::container>( + rc::make_fragment_2d(allow_dups, domain, domain)); + auto arrayguard = temp_array(allow_dups); + Array forread(ctx, array_uri, TILEDB_READ); + + instance( + vfs_test_setup.ctx(), array_uri, fragments, sm::Layout::UNORDERED); + }); +} + +TEST_CASE( + "Fragment metadata global order bounds: 1D var", + "[fragment_info][global-order]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = + vfs_test_setup.array_uri("fragment_metadata_global_order_bounds_1d_var"); + + const bool allow_dups = GENERATE(true, false); + + const templates::Dimension dimension; + + templates::ddl::create_array( + array_uri, + vfs_test_setup.ctx(), + std::tuple&>{ + dimension}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + DeleteArrayGuard delarray( + vfs_test_setup.ctx().ptr().get(), array_uri.c_str()); + + using Fragment = Fragment1DVar; + + SECTION("Minimum write") { + const std::vector value = {'f', 'o', 'o'}; + + Fragment f; + f.resize(1); + f.dimension().push_back(value); + + std::vector>> fragment_bounds; + SECTION("Global Order") { + fragment_bounds = instance( + vfs_test_setup.ctx(), array_uri, std::vector{f}); + } + + SECTION("Unordered") { + fragment_bounds = instance( + vfs_test_setup.ctx(), + array_uri, + std::vector{f}, + sm::Layout::UNORDERED); + } + REQUIRE(fragment_bounds.size() == 1); + + CHECK( + fragment_bounds[0] == + std::vector>{ + std::make_pair(std::make_tuple(value), std::make_tuple(value))}); + } +} diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index 50dade68e19..cdbec503f69 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -108,11 +108,12 @@ struct query_buffers {}; * Constrains types which can be used as the physical type of a dimension. */ template -concept DimensionType = requires(const D& coord) { - typename std::is_signed; - { coord < coord } -> std::same_as; - { D(int64_t(coord)) } -> std::same_as; -}; +concept DimensionType = + std::is_same_v> or requires(const D& coord) { + typename std::is_signed; + { coord < coord } -> std::same_as; + { D(int64_t(coord)) } -> std::same_as; + }; /** * Constrains types which can be used as the physical type of an attribute. @@ -208,6 +209,11 @@ struct Dimension { value_type extent; }; +template <> +struct Dimension { + using value_type = std::vector; +}; + template struct static_attribute {}; @@ -427,7 +433,7 @@ struct query_buffers { std::vector values_; - query_buffers() { + constexpr query_buffers() { } query_buffers(const self_type& other) @@ -586,7 +592,7 @@ struct query_buffers> { std::vector values_; std::vector validity_; - query_buffers() { + constexpr query_buffers() { } query_buffers(const self_type& other) = default; @@ -813,7 +819,7 @@ struct query_buffers> { std::vector values_; std::vector offsets_; - query_buffers() { + constexpr query_buffers() { } query_buffers(const self_type& other) = default; @@ -976,7 +982,7 @@ struct query_buffers>> { std::vector offsets_; std::vector validity_; - query_buffers() { + constexpr query_buffers() { } query_buffers(const self_type& other) = default; @@ -1531,9 +1537,14 @@ void create_array( using CoordType = templates::Dimension::value_type; dimension_names.push_back("d" + std::to_string(dimension_names.size() + 1)); dimension_types.push_back(static_cast(D)); - dimension_ranges.push_back( - const_cast(&dimension.domain.lower_bound)); - dimension_extents.push_back(const_cast(&dimension.extent)); + if constexpr (std::is_same_v>) { + dimension_ranges.push_back(nullptr); + dimension_extents.push_back(nullptr); + } else { + dimension_ranges.push_back( + const_cast(&dimension.domain.lower_bound)); + dimension_extents.push_back(const_cast(&dimension.extent)); + } }; std::apply( [&](const templates::Dimension&... dimension) { diff --git a/tiledb/api/c_api/fragment_info/fragment_info_api_external.h b/tiledb/api/c_api/fragment_info/fragment_info_api_external.h index 548d45d08fe..438476388e4 100644 --- a/tiledb/api/c_api/fragment_info/fragment_info_api_external.h +++ b/tiledb/api/c_api/fragment_info/fragment_info_api_external.h @@ -733,6 +733,9 @@ TILEDB_EXPORT capi_return_t tiledb_fragment_info_get_mbr_var_from_name( * &dimensions[0]); * @endcode * + * If any dimension is `NULL` then only the size is returned. This enables + * asking for the size of the bounds of variable-length dimensions. + * * @param[in] ctx The TileDB context * @param[in] fragment_info The fragment info object. * @param[in] fragment_id The index of the fragment of interest From 23e9420b6d21d8cec3d76933cfafede64c788a2d Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 16 Sep 2025 14:33:26 -0400 Subject: [PATCH 22/53] Trivial 1D var test passes --- .../unit-fragment-info-global-order-bounds.cc | 54 +++++++++---------- tiledb/sm/fragment/fragment_info.cc | 14 +++-- tiledb/sm/fragment/fragment_metadata.cc | 31 ++++++----- tiledb/sm/tile/tile_metadata_generator.cc | 24 +++++++-- 4 files changed, 70 insertions(+), 53 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index b0f15784de6..49d1343d7ff 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -112,10 +112,22 @@ template using DimensionTuple = stdx::decay_tuple().dimensions())>; +template +T to_owned(T value) { + return value; +} + +template +std::vector to_owned(std::span value) { + return std::vector(value.begin(), value.end()); +} + template auto tuple_index(const T& tuple, uint64_t idx) { return std::apply( - [&](const auto&... field) { return std::make_tuple(field[idx]...); }, + [&](const auto&... field) { + return std::make_tuple(to_owned(field[idx])...); + }, tuple); } @@ -161,8 +173,10 @@ static void allocate_var_bound_buffers( auto allocate_var_bound_buffer = [&](uint64_t i, templates::query_buffers& qbuf) { if constexpr (std::is_same_v>) { - qbuf.values_.reserve(sizes[i]); + qbuf.values_.resize(sizes[i]); qbuf.offsets_ = {0}; + } else { + qbuf.resize(1); } }; @@ -189,16 +203,6 @@ static Bounds global_order_bounds( // Otherwise it is unsafe to call this API with variable-length dimensions DimensionTuple lb, ub; - std::apply( - [](templates::query_buffers&... field) { - (field.resize(1), ...); - }, - lb); - std::apply( - [](templates::query_buffers&... field) { - (field.resize(1), ...); - }, - ub); size_t lb_sizes[num_fields]; std::array lb_dimensions; @@ -206,12 +210,12 @@ static Bounds global_order_bounds( size_t ub_sizes[num_fields]; std::array ub_dimensions; - prepare_bound_buffers(lb, lb_dimensions); - prepare_bound_buffers(ub, ub_dimensions); - auto ctx_c = finfo.context().ptr().get(); auto call = [&]() { + prepare_bound_buffers(lb, lb_dimensions); + prepare_bound_buffers(ub, ub_dimensions); + // FIXME: add C++ API auto rc = tiledb_fragment_info_get_global_order_lower_bound( ctx_c, @@ -232,20 +236,11 @@ static Bounds global_order_bounds( throw_if_error(ctx_c, rc); }; - static constexpr bool has_var_dimension = std::apply( - [](const templates::query_buffers&...) { - return std::disjunction_v, Ts>...>; - }, - lb); - if constexpr (has_var_dimension) { - // determine length, then allocate, then call again - call(); - allocate_var_bound_buffers(lb, lb_sizes); - allocate_var_bound_buffers(ub, ub_sizes); - call(); - } else { - call(); - } + // determine length, then allocate, then call again + call(); + allocate_var_bound_buffers(lb, lb_sizes); + allocate_var_bound_buffers(ub, ub_sizes); + call(); return std::make_pair(tuple_index(lb, 0), tuple_index(ub, 0)); } @@ -827,7 +822,6 @@ TEST_CASE( const std::vector value = {'f', 'o', 'o'}; Fragment f; - f.resize(1); f.dimension().push_back(value); std::vector>> fragment_bounds; diff --git a/tiledb/sm/fragment/fragment_info.cc b/tiledb/sm/fragment/fragment_info.cc index ade2666c469..84681b9aec8 100644 --- a/tiledb/sm/fragment/fragment_info.cc +++ b/tiledb/sm/fragment/fragment_info.cc @@ -751,8 +751,10 @@ static Status read_global_order_bound_to_user_buffers( dimension_sizes[d] = offsets[which_tile + 1] - offsets[which_tile]; } - const void* coord = &varPart[d][offsets[which_tile]]; - memcpy(dimensions[d], coord, dimension_sizes[d]); + if (dimensions[d]) { + const void* coord = &varPart[d][offsets[which_tile]]; + memcpy(dimensions[d], coord, dimension_sizes[d]); + } } else { const uint64_t dimFixedSize = ds[d]->cell_size(); if (dimFixedSize * which_tile >= fixedPart[d].size()) { @@ -760,8 +762,12 @@ static Status read_global_order_bound_to_user_buffers( "Cannot get MBR global order bound: Invalid mbr index"); } - const void* coord = &fixedPart[d].data()[which_tile * dimFixedSize]; - memcpy(dimensions[d], coord, dimFixedSize); + dimension_sizes[d] = dimFixedSize; + + if (dimensions[d]) { + const void* coord = &fixedPart[d].data()[which_tile * dimFixedSize]; + memcpy(dimensions[d], coord, dimFixedSize); + } } } diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index 3094c6a4744..161dc5846a8 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -390,6 +390,15 @@ void FragmentMetadata::set_tile_global_order_bounds_fixed( } } +/** + * Writes the variable-length part of the global order bounds into the fragment + * metadata. + * + * The seqeunce of calls is + * 1) set_tile_global_order_bounds_fixed + * 2) convert_tile_global_order_bounds_sizes_to_offsets + * 3) this + */ void FragmentMetadata::set_tile_global_order_bounds_var( const std::string& dim_name, uint64_t tile, const WriterTileTuple& data) { const auto dim = array_schema_->domain().get_dimension_index(dim_name); @@ -403,33 +412,27 @@ void FragmentMetadata::set_tile_global_order_bounds_var( const auto& tile_max = data.global_order_max(); iassert(tile_max.has_value()); - const uint64_t* min_sizes = reinterpret_cast( + const uint64_t* min_offsets = reinterpret_cast( loaded_metadata_ptr_->tile_global_order_min_buffer()[dim].data()); - const uint64_t* max_sizes = reinterpret_cast( + const uint64_t* max_offsets = reinterpret_cast( loaded_metadata_ptr_->tile_global_order_max_buffer()[dim].data()); - const uint64_t* data_offsets = data.offset_tile().data_as(); - const uint64_t min_var_start = data_offsets[0]; - const uint64_t min_var_size = min_sizes[0]; - const uint64_t max_var_start = data_offsets[data.cell_num() - 1]; - const uint64_t max_var_size = max_sizes[data.cell_num() - 1]; - - iassert(tile_min.value().size() == min_var_size); - iassert(tile_max.value().size() == max_var_size); + const uint64_t min_var_start = min_offsets[tile]; + const uint64_t max_var_start = max_offsets[tile]; - if (min_var_size) { + if (tile_min.value().size()) { memcpy( &loaded_metadata_ptr_ ->tile_global_order_min_var_buffer()[tile][min_var_start], tile_min.value().data(), - min_var_size); + tile_min.value().size()); } - if (max_var_size) { + if (tile_max.value().size()) { memcpy( &loaded_metadata_ptr_ ->tile_global_order_max_var_buffer()[tile][max_var_start], tile_max.value().data(), - max_var_size); + tile_max.value().size()); } } diff --git a/tiledb/sm/tile/tile_metadata_generator.cc b/tiledb/sm/tile/tile_metadata_generator.cc index f2acb4a3440..4df38a68fe6 100644 --- a/tiledb/sm/tile/tile_metadata_generator.cc +++ b/tiledb/sm/tile/tile_metadata_generator.cc @@ -570,16 +570,30 @@ void TileMetadataGenerator::process_cell_range_var( const auto& offset_tile = tile.offset_tile(); const auto& var_tile = tile.var_tile(); - // Handle empty tile. - if (!has_min_max_ || offset_tile.size() == 0) { - return; - } - // Get pointers to the data and cell num. auto offset_value = offset_tile.data_as() + start; auto var_data = var_tile.data_as(); auto cell_num = tile.cell_num(); + if (is_dim_) { + iassert(end > start); + global_order_min_ = var_tile.data(); + global_order_min_size_ = + (start == cell_num - 1 ? (var_tile.size() - offset_value[0]) : + (offset_value[1] - offset_value[0])); + + const uint64_t imax = end - start - 1; + global_order_max_ = var_tile.data_u8() + offset_value[imax]; + global_order_max_size_ = + (end == cell_num ? (var_tile.size() - offset_value[imax]) : + (offset_value[imax + 1] - offset_value[imax])); + } + + // Handle empty tile. + if (!has_min_max_ || offset_tile.size() == 0) { + return; + } + // Var size attribute, non nullable. if (!tile.nullable()) { if (min_ == nullptr) { From 49cd23e1ae754fb4778e1f0914791dc930d5b3ba Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 16 Sep 2025 14:51:23 -0400 Subject: [PATCH 23/53] Minimum write GENERATE --- test/src/unit-fragment-info-global-order-bounds.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 49d1343d7ff..0219bc43e07 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -819,7 +819,8 @@ TEST_CASE( using Fragment = Fragment1DVar; SECTION("Minimum write") { - const std::vector value = {'f', 'o', 'o'}; + std::string svalue = GENERATE("foo", "", "long-ish string"); + const std::vector value(svalue.begin(), svalue.end()); Fragment f; f.dimension().push_back(value); From 5dbee5019da7531a0dc4ad58c0235bfa32729156 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 16 Sep 2025 23:00:39 -0400 Subject: [PATCH 24/53] Fix global_cell_cmp_std_tuple for variable-length dimension --- test/support/src/array_templates.h | 46 ++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index cdbec503f69..be7f1a84614 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -69,26 +69,42 @@ struct global_cell_cmp_std_tuple { : tup_(tup) { } + private: + template + static constexpr tiledb::common::UntypedDatumView static_coord_datum( + const T& field) { + static_assert( + stdx::is_fundamental || + std::is_same_v> || + std::is_same_v>); + if constexpr (stdx::is_fundamental) { + return UntypedDatumView(&field, sizeof(T)); + } else { + return UntypedDatumView(field.data(), field.size()); + } + } + + template + static tiledb::common::UntypedDatumView try_dimension_datum( + const StdTuple& tup, unsigned dim) { + if (dim == I) { + return static_coord_datum(std::get(tup)); + } else if constexpr (I + 1 < std::tuple_size_v) { + return try_dimension_datum(tup, dim); + } else { + // NB: probably not reachable in practice + throw std::logic_error("Out of bounds access to dimension tuple"); + } + } + + public: tiledb::common::UntypedDatumView dimension_datum( const tiledb::sm::Dimension&, unsigned dim_idx) const { - return std::apply( - [&](const auto&... field) { - size_t sizes[] = {sizeof(std::decay_t)...}; - const void* const ptrs[] = { - static_cast(std::addressof(field))...}; - return UntypedDatumView(ptrs[dim_idx], sizes[dim_idx]); - }, - tup_); + return try_dimension_datum<0>(tup_, dim_idx); } const void* coord(unsigned dim) const { - return std::apply( - [&](const auto&... field) { - const void* const ptrs[] = { - static_cast(std::addressof(field))...}; - return ptrs[dim]; - }, - tup_); + return try_dimension_datum<0>(tup_, dim).content(); } StdTuple tup_; From 23c397f02161d8c8c203060ee75bd76fd6ef292e Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 16 Sep 2025 23:00:55 -0400 Subject: [PATCH 25/53] Fix set_tile_global_order_bounds_var indexing --- tiledb/sm/fragment/fragment_metadata.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index 161dc5846a8..7b1b1c131ec 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -423,14 +423,14 @@ void FragmentMetadata::set_tile_global_order_bounds_var( if (tile_min.value().size()) { memcpy( &loaded_metadata_ptr_ - ->tile_global_order_min_var_buffer()[tile][min_var_start], + ->tile_global_order_min_var_buffer()[dim][min_var_start], tile_min.value().data(), tile_min.value().size()); } if (tile_max.value().size()) { memcpy( &loaded_metadata_ptr_ - ->tile_global_order_max_var_buffer()[tile][max_var_start], + ->tile_global_order_max_var_buffer()[dim][max_var_start], tile_max.value().data(), tile_max.value().size()); } From 0e25643f26e0de65b13ba110227f6638102f09ab Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 16 Sep 2025 23:11:50 -0400 Subject: [PATCH 26/53] Nontrival 1D var test --- .../unit-fragment-info-global-order-bounds.cc | 107 ++++++++++++++++-- 1 file changed, 98 insertions(+), 9 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 0219bc43e07..aeaa9ad557f 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -802,19 +802,20 @@ TEST_CASE( const templates::Dimension dimension; + Context ctx = vfs_test_setup.ctx(); + templates::ddl::create_array( array_uri, - vfs_test_setup.ctx(), + ctx, std::tuple&>{ dimension}, std::vector>{}, TILEDB_ROW_MAJOR, TILEDB_ROW_MAJOR, - 8, + 32, allow_dups); - DeleteArrayGuard delarray( - vfs_test_setup.ctx().ptr().get(), array_uri.c_str()); + DeleteArrayGuard delarray(ctx.ptr().get(), array_uri.c_str()); using Fragment = Fragment1DVar; @@ -828,15 +829,12 @@ TEST_CASE( std::vector>> fragment_bounds; SECTION("Global Order") { fragment_bounds = instance( - vfs_test_setup.ctx(), array_uri, std::vector{f}); + ctx, array_uri, std::vector{f}); } SECTION("Unordered") { fragment_bounds = instance( - vfs_test_setup.ctx(), - array_uri, - std::vector{f}, - sm::Layout::UNORDERED); + ctx, array_uri, std::vector{f}, sm::Layout::UNORDERED); } REQUIRE(fragment_bounds.size() == 1); @@ -845,4 +843,95 @@ TEST_CASE( std::vector>{ std::make_pair(std::make_tuple(value), std::make_tuple(value))}); } + + SECTION("Single fragment") { + const std::vector words = { + "foo", "bar", "baz", "quux", "corge", "grault", "gub"}; + + Fragment f; + for (const auto& s1 : words) { + for (const auto& s2 : words) { + for (const auto& s3 : words) { + std::vector coord(s1.begin(), s1.end()); + coord.insert(coord.end(), s2.begin(), s2.end()); + coord.insert(coord.end(), s3.begin(), s3.end()); + + f.dimension().push_back(coord); + } + } + } + + const sm::Layout layout = + GENERATE(sm::Layout::UNORDERED, sm::Layout::GLOBAL_ORDER); + + if (layout == sm::Layout::GLOBAL_ORDER) { + Array forread(ctx, array_uri, TILEDB_READ); + f = make_global_order(forread, f, tiledb::sm::Layout::UNORDERED); + } + + DYNAMIC_SECTION( + "allow_dups = " + std::to_string(allow_dups) + + ", layout = " + sm::layout_str(layout)) { + const auto fragment_bounds = + instance( + ctx, array_uri, std::vector{f}, layout); + REQUIRE(fragment_bounds.size() == 1); + CHECK(fragment_bounds[0].size() == 11); + + auto lbstr = [](const Bounds& bound) { + const auto& value = std::get<0>(std::get<0>(bound)); + return std::string(value.begin(), value.end()); + }; + auto ubstr = [](const Bounds& bound) { + const auto& value = std::get<0>(std::get<1>(bound)); + return std::string(value.begin(), value.end()); + }; + + CHECK(fragment_bounds[0].size() == 11); + if (fragment_bounds[0].size() > 0) { + CHECK(lbstr(fragment_bounds[0][0]) == "barbarbar"); + CHECK(ubstr(fragment_bounds[0][0]) == "bargraultfoo"); + } + if (fragment_bounds[0].size() > 1) { + CHECK(lbstr(fragment_bounds[0][1]) == "bargraultgrault"); + CHECK(ubstr(fragment_bounds[0][1]) == "bazcorgebar"); + } + if (fragment_bounds[0].size() > 2) { + CHECK(lbstr(fragment_bounds[0][2]) == "bazcorgebaz"); + CHECK(ubstr(fragment_bounds[0][2]) == "bazquuxgrault"); + } + if (fragment_bounds[0].size() > 3) { + CHECK(lbstr(fragment_bounds[0][3]) == "bazquuxgub"); + CHECK(ubstr(fragment_bounds[0][3]) == "corgegraultbaz"); + } + if (fragment_bounds[0].size() > 4) { + CHECK(lbstr(fragment_bounds[0][4]) == "corgegraultcorge"); + CHECK(ubstr(fragment_bounds[0][4]) == "foobazgub"); + } + if (fragment_bounds[0].size() > 5) { + CHECK(lbstr(fragment_bounds[0][5]) == "foobazquux"); + CHECK(ubstr(fragment_bounds[0][5]) == "fooquuxcorge"); + } + if (fragment_bounds[0].size() > 6) { + CHECK(lbstr(fragment_bounds[0][6]) == "fooquuxfoo"); + CHECK(ubstr(fragment_bounds[0][6]) == "graultfooquux"); + } + if (fragment_bounds[0].size() > 7) { + CHECK(lbstr(fragment_bounds[0][7]) == "graultgraultbar"); + CHECK(ubstr(fragment_bounds[0][7]) == "gubbazfoo"); + } + if (fragment_bounds[0].size() > 8) { + CHECK(lbstr(fragment_bounds[0][8]) == "gubbazgrault"); + CHECK(ubstr(fragment_bounds[0][8]) == "gubquuxbar"); + } + if (fragment_bounds[0].size() > 9) { + CHECK(lbstr(fragment_bounds[0][9]) == "gubquuxbaz"); + CHECK(ubstr(fragment_bounds[0][9]) == "quuxfoograult"); + } + if (fragment_bounds[0].size() > 10) { + CHECK(lbstr(fragment_bounds[0][10]) == "quuxfoogub"); + CHECK(ubstr(fragment_bounds[0][10]) == "quuxquuxquux"); + } + } + } } From b7ab7f17532397d68819a87af1420592c046592b Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Wed, 17 Sep 2025 22:53:52 -0400 Subject: [PATCH 27/53] Fragment::extend --- test/support/src/array_templates.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index be7f1a84614..5e5a6e7984c 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -1195,6 +1195,8 @@ struct Fragment { decltype(f_qb_const_ref(std::declval())); public: + using self_type = Fragment; + using DimensionBuffers = value_tuple_query_buffers; using DimensionBuffersRef = ref_tuple_query_buffers; using DimensionBuffersConstRef = @@ -1264,6 +1266,16 @@ struct Fragment { }, std::tuple_cat(dimensions(), attributes())); } + + void extend(const self_type& other) { + std::apply( + [&](Ts&... dst) { + std::apply( + [&](const Us&... src) { (dst.extend(src), ...); }, + std::tuple_cat(other.dimensions(), other.attributes())); + }, + std::tuple_cat(dimensions(), attributes())); + } }; /** From a0dedb91e552c96c3a2d8caa651818cdfe776ce5 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Wed, 17 Sep 2025 22:54:19 -0400 Subject: [PATCH 28/53] Fix prepare_bound_buffers for empty qb --- test/src/unit-fragment-info-global-order-bounds.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index aeaa9ad557f..4e31f63b273 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -151,7 +151,11 @@ static void prepare_bound_buffers( ptrs[i] = qbuf.values_.data(); } } else { - ptrs[i] = static_cast(&qbuf[0]); + if (qbuf.num_cells() == 0) { + ptrs[i] = nullptr; + } else { + ptrs[i] = static_cast(&qbuf[0]); + } } }; From eed749a97744da91256a167ccfae2866ec1b84ea Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Wed, 17 Sep 2025 22:55:35 -0400 Subject: [PATCH 29/53] Fragment metadata global order bounds: 1D fixed consolidation non-overlapping single-tile fragment section --- .../unit-fragment-info-global-order-bounds.cc | 342 +++++++++++++++--- 1 file changed, 300 insertions(+), 42 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 4e31f63b273..ea426c7dde6 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -249,6 +249,34 @@ static Bounds global_order_bounds( return std::make_pair(tuple_index(lb, 0), tuple_index(ub, 0)); } +/** + * @return the global order bounds of all tiles in all fragments; [f][t] + * contains the bounds of the t'th tile of fragment f + */ +template +std::vector>> get_all_bounds( + const Context& ctx, const std::string& array_uri) { + FragmentInfo finfo(ctx, array_uri); + finfo.load(); + + std::vector>> bounds; + bounds.reserve(finfo.fragment_num()); + + for (uint64_t f = 0; f < finfo.fragment_num(); f++) { + const uint64_t num_tiles = finfo.mbr_num(f); + + std::vector> this_fragment_bounds; + this_fragment_bounds.reserve(num_tiles); + for (uint64_t t = 0; t < num_tiles; t++) { + this_fragment_bounds.push_back(global_order_bounds(finfo, f, t)); + } + + bounds.push_back(this_fragment_bounds); + } + + return bounds; +} + /** * Asserts that when a set of fragments are written, the fragment metadata * accurately reflects the expected global order bounds of the input. @@ -261,7 +289,7 @@ static Bounds global_order_bounds( * @return the global order bounds for each tile per fragment */ template -std::vector>> instance( +std::vector>> assert_written_bounds( const Context& ctx, const std::string& array_uri, const std::vector& fragments, @@ -276,20 +304,19 @@ std::vector>> instance( } // check bounds - std::vector, CoordsTuple>>> bounds; - Array forread(ctx, array_uri, TILEDB_READ); + std::vector>> bounds = + get_all_bounds(ctx, array_uri); + Array forread(ctx, array_uri, TILEDB_READ); const uint64_t tile_stride = forread.schema().capacity(); - FragmentInfo finfo(ctx, array_uri); - finfo.load(); - + ASSERTER(bounds.size() == fragments.size()); for (size_t f = 0; f < fragments.size(); f++) { const auto fragment = make_global_order(forread, fragments[f], layout); - std::decay_t fragment_bounds; + const uint64_t num_tiles = bounds[f].size(); + ASSERTER(num_tiles == (fragment.size() + tile_stride - 1) / tile_stride); - const uint64_t num_tiles = finfo.mbr_num(f); for (size_t t = 0; t < num_tiles; t++) { const uint64_t lbi = t * tile_stride; const uint64_t ubi = std::min((t + 1) * tile_stride, fragment.size()) - 1; @@ -297,14 +324,10 @@ std::vector>> instance( const auto lbexpect = tuple_index(fragment.dimensions(), lbi); const auto ubexpect = tuple_index(fragment.dimensions(), ubi); - const auto [lbactual, ubactual] = global_order_bounds(finfo, f, t); + const auto [lbactual, ubactual] = bounds[f][t]; ASSERTER(lbexpect == lbactual); ASSERTER(ubexpect == ubactual); - - fragment_bounds.push_back(std::make_pair(lbactual, ubactual)); } - - bounds.push_back(fragment_bounds); } return bounds; @@ -355,16 +378,18 @@ TEST_CASE( std::vector>> fragment_bounds; SECTION("Global Order") { - fragment_bounds = instance( - vfs_test_setup.ctx(), array_uri, std::vector{f}); + fragment_bounds = + assert_written_bounds( + vfs_test_setup.ctx(), array_uri, std::vector{f}); } SECTION("Unordered") { - fragment_bounds = instance( - vfs_test_setup.ctx(), - array_uri, - std::vector{f}, - sm::Layout::UNORDERED); + fragment_bounds = + assert_written_bounds( + vfs_test_setup.ctx(), + array_uri, + std::vector{f}, + sm::Layout::UNORDERED); } REQUIRE(fragment_bounds.size() == 1); CHECK( @@ -390,7 +415,7 @@ TEST_CASE( SECTION("Global Order") { std::iota(f.dimension().begin(), f.dimension().end(), 1); const auto fragment_bounds = - instance( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, std::vector{f}); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect); @@ -402,7 +427,7 @@ TEST_CASE( } const auto fragment_bounds = - instance( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, std::vector{f}, @@ -421,7 +446,7 @@ TEST_CASE( f.dimension() = {0, 0, 0, 0, 0, 0, 0, 0, 1}; const auto fragment_bounds = - instance( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, std::vector{f}, @@ -435,7 +460,7 @@ TEST_CASE( f.dimension() = {0, 0, 0, 1, 0, 0, 0, 0, 0}; const auto fragment_bounds = - instance( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, std::vector{f}, @@ -486,7 +511,7 @@ TEST_CASE( forread, fragment, sm::Layout::UNORDERED)); } - instance( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, global_order_fragments, @@ -500,7 +525,7 @@ TEST_CASE( auto arrayguard = temp_array(allow_dups); Array forread(ctx, array_uri, TILEDB_READ); - instance( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, fragments, sm::Layout::UNORDERED); }); } @@ -662,13 +687,15 @@ TEST_CASE( std::vector>> fragment_bounds; SECTION("Global Order") { - fragment_bounds = instance( - ctx, array_uri, std::vector{f}); + fragment_bounds = + assert_written_bounds( + ctx, array_uri, std::vector{f}); } SECTION("Unordered") { - fragment_bounds = instance( - ctx, array_uri, std::vector{f}, sm::Layout::UNORDERED); + fragment_bounds = + assert_written_bounds( + ctx, array_uri, std::vector{f}, sm::Layout::UNORDERED); } std::vector expect_bounds = {{{0, 0}, {0, 0}}}; @@ -678,7 +705,7 @@ TEST_CASE( DYNAMIC_SECTION("Row (layout = " + sm::layout_str(layout) + ")") { const auto fragment_bounds = - instance( + assert_written_bounds( ctx, array_uri, std::vector{row}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect_row_bounds); @@ -686,7 +713,7 @@ TEST_CASE( DYNAMIC_SECTION("Column (layout = " + sm::layout_str(layout) + ")") { const auto fragment_bounds = - instance( + assert_written_bounds( ctx, array_uri, std::vector{col}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect_col_bounds); @@ -694,7 +721,7 @@ TEST_CASE( DYNAMIC_SECTION("Square (layout = " + sm::layout_str(layout) + ")") { const auto fragment_bounds = - instance( + assert_written_bounds( ctx, array_uri, std::vector{square}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect_square_bounds); @@ -702,7 +729,7 @@ TEST_CASE( DYNAMIC_SECTION("Square offset (layout = " + sm::layout_str(layout) + ")") { const auto fragment_bounds = - instance( + assert_written_bounds( ctx, array_uri, std::vector{square_offset}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0] == expect_square_offset_bounds); @@ -711,7 +738,7 @@ TEST_CASE( DYNAMIC_SECTION("Multi-fragment (layout = " + sm::layout_str(layout) + ")") { const std::vector fragments = {col, square_offset, row, square}; const auto fragment_bounds = - instance( + assert_written_bounds( ctx, array_uri, fragments, layout); CHECK(fragment_bounds.size() >= 1); if (fragment_bounds.size() >= 1) { @@ -776,7 +803,7 @@ TEST_CASE( forread, fragment, sm::Layout::UNORDERED)); } - instance( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, global_order_fragments, @@ -790,7 +817,7 @@ TEST_CASE( auto arrayguard = temp_array(allow_dups); Array forread(ctx, array_uri, TILEDB_READ); - instance( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, fragments, sm::Layout::UNORDERED); }); } @@ -832,13 +859,15 @@ TEST_CASE( std::vector>> fragment_bounds; SECTION("Global Order") { - fragment_bounds = instance( - ctx, array_uri, std::vector{f}); + fragment_bounds = + assert_written_bounds( + ctx, array_uri, std::vector{f}); } SECTION("Unordered") { - fragment_bounds = instance( - ctx, array_uri, std::vector{f}, sm::Layout::UNORDERED); + fragment_bounds = + assert_written_bounds( + ctx, array_uri, std::vector{f}, sm::Layout::UNORDERED); } REQUIRE(fragment_bounds.size() == 1); @@ -877,7 +906,7 @@ TEST_CASE( "allow_dups = " + std::to_string(allow_dups) + ", layout = " + sm::layout_str(layout)) { const auto fragment_bounds = - instance( + assert_written_bounds( ctx, array_uri, std::vector{f}, layout); REQUIRE(fragment_bounds.size() == 1); CHECK(fragment_bounds[0].size() == 11); @@ -939,3 +968,232 @@ TEST_CASE( } } } + +template +std::vector>> consolidate_n_wise( + const Context& ctx, const std::string& uri, uint64_t fan_in) { + // step 0: consolidation config + // NB: this ideally would not be needed but in debug builds + // a huge amount of memory is allocated and initialized which is very slow + Config cfg; + cfg["sm.mem.total_budget"] = std::to_string(128 * 1024 * 1024); + + // step 1: n-wise consolidate + std::vector s_fragment_uris; + { + FragmentInfo fi(ctx, uri); + fi.load(); + + for (uint32_t f = 0; f < fi.fragment_num(); f++) { + s_fragment_uris.push_back(fi.fragment_uri(f)); + } + } + for (uint32_t f = 0; f < s_fragment_uris.size(); f += fan_in) { + std::vector fragment_uris; + for (uint32_t ff = f; ff < std::min(f + fan_in, s_fragment_uris.size()); + ff++) { + fragment_uris.push_back(s_fragment_uris[ff].c_str()); + } + + Array::consolidate( + ctx, uri, fragment_uris.data(), fragment_uris.size(), &cfg); + } + + // step 2: retrieve bounds of new fragments + return get_all_bounds(ctx, uri); +} + +template +struct ConsolidateOutput { + std::vector fragment_data_; + std::vector>> bounds_; +}; + +template +ConsolidateOutput assert_consolidate_n_wise_bounds( + const Context& ctx, + const std::string& array_uri, + const std::vector& input_fragment_data, + uint64_t fan_in) { + const auto actual_bounds = consolidate_n_wise(ctx, array_uri, fan_in); + + Array forread(ctx, array_uri, TILEDB_READ); + const uint64_t tile_stride = forread.schema().capacity(); + + std::vector output_fragments; + for (uint64_t f = 0; f < input_fragment_data.size(); f += fan_in) { + F output_fragment; + for (uint64_t ff = f; ff < std::min(f + fan_in, input_fragment_data.size()); + ff++) { + output_fragment.extend(input_fragment_data[ff]); + } + + output_fragments.push_back( + make_global_order(forread, output_fragment, sm::Layout::UNORDERED)); + } + + ASSERTER(output_fragments.size() == actual_bounds.size()); + for (uint64_t f = 0; f < output_fragments.size(); f++) { + const uint64_t num_tiles = actual_bounds[f].size(); + ASSERTER( + num_tiles == + (output_fragments[f].size() + tile_stride - 1) / tile_stride); + + for (size_t t = 0; t < num_tiles; t++) { + const uint64_t lbi = t * tile_stride; + const uint64_t ubi = + std::min((t + 1) * tile_stride, output_fragments[f].size()) - 1; + + const auto lbexpect = tuple_index(output_fragments[f].dimensions(), lbi); + const auto ubexpect = tuple_index(output_fragments[f].dimensions(), ubi); + + const auto [lbactual, ubactual] = actual_bounds[f][t]; + ASSERTER(lbexpect == lbactual); + ASSERTER(ubexpect == ubactual); + } + } + + return ConsolidateOutput{ + .fragment_data_ = output_fragments, .bounds_ = actual_bounds}; +} + +TEST_CASE( + "Fragment metadata global order bounds: 1D fixed consolidation", + "[fragment_info][global-order]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_1d_fixed_consolidation"); + + const templates::Dimension dimension( + templates::Domain(0, 1024 * 8), 16); + + Context ctx = vfs_test_setup.ctx(); + + templates::ddl::create_array( + array_uri, + ctx, + std::tuple&>{dimension}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + false); + + DeleteArrayGuard delarray(ctx.ptr().get(), array_uri.c_str()); + + using Fragment = templates::Fragment1D; + + SECTION("Single tile") { + std::vector fs; + for (uint64_t f = 0; f < 8; f++) { + Fragment input; + input.resize(8); + std::iota(input.dimension().begin(), input.dimension().end(), 1 + f * 8); + fs.push_back(input); + } + + const auto fragment_bounds = + assert_written_bounds( + vfs_test_setup.ctx(), array_uri, fs); + REQUIRE(fragment_bounds.size() == fs.size()); + + SECTION("Pairs") { + const auto pairwise = assert_consolidate_n_wise_bounds< + tiledb::test::AsserterCatch, + Fragment>(ctx, array_uri, fs, 2); + CHECK(pairwise.bounds_.size() == fs.size() / 2); + + // each new fragment should have two tiles each, + // and since they are ascending they should just be a concatenation + CHECK( + pairwise.bounds_[0] == + std::vector>{ + fragment_bounds[0][0], fragment_bounds[1][0]}); + CHECK( + pairwise.bounds_[1] == + std::vector>{ + fragment_bounds[2][0], fragment_bounds[3][0]}); + CHECK( + pairwise.bounds_[2] == + std::vector>{ + fragment_bounds[4][0], fragment_bounds[5][0]}); + CHECK( + pairwise.bounds_[3] == + std::vector>{ + fragment_bounds[6][0], fragment_bounds[7][0]}); + + // run another round, now each should have four tiles + const auto quadwise = assert_consolidate_n_wise_bounds< + tiledb::test::AsserterCatch, + Fragment>(ctx, array_uri, pairwise.fragment_data_, 2); + CHECK(quadwise.bounds_.size() == 2); + CHECK( + quadwise.bounds_[0] == std::vector>{ + fragment_bounds[0][0], + fragment_bounds[1][0], + fragment_bounds[2][0], + fragment_bounds[3][0]}); + CHECK( + quadwise.bounds_[1] == std::vector>{ + fragment_bounds[4][0], + fragment_bounds[5][0], + fragment_bounds[6][0], + fragment_bounds[7][0]}); + + // run final round + const auto octwise = assert_consolidate_n_wise_bounds< + tiledb::test::AsserterCatch, + Fragment>(ctx, array_uri, quadwise.fragment_data_, 2); + CHECK(octwise.bounds_.size() == 1); + CHECK( + octwise.bounds_[0] == std::vector>{ + fragment_bounds[0][0], + fragment_bounds[1][0], + fragment_bounds[2][0], + fragment_bounds[3][0], + fragment_bounds[4][0], + fragment_bounds[5][0], + fragment_bounds[6][0], + fragment_bounds[7][0]}); + } + + SECTION("Triples") { + const auto triwise = assert_consolidate_n_wise_bounds< + tiledb::test::AsserterCatch, + Fragment>(ctx, array_uri, fs, 3); + CHECK(triwise.bounds_.size() == 3); + + // see notes above + CHECK( + triwise.bounds_[0] == std::vector>{ + fragment_bounds[0][0], + fragment_bounds[1][0], + fragment_bounds[2][0]}); + CHECK( + triwise.bounds_[1] == std::vector>{ + fragment_bounds[3][0], + fragment_bounds[4][0], + fragment_bounds[5][0]}); + CHECK( + triwise.bounds_[2] == + std::vector>{ + fragment_bounds[6][0], fragment_bounds[7][0]}); + + const auto ninewise = assert_consolidate_n_wise_bounds< + tiledb::test::AsserterCatch, + Fragment>(ctx, array_uri, triwise.fragment_data_, 3); + + CHECK(ninewise.bounds_.size() == 1); + CHECK( + ninewise.bounds_[0] == std::vector>{ + fragment_bounds[0][0], + fragment_bounds[1][0], + fragment_bounds[2][0], + fragment_bounds[3][0], + fragment_bounds[4][0], + fragment_bounds[5][0], + fragment_bounds[6][0], + fragment_bounds[7][0]}); + } + } +} From 38b981b9774ce0edb525fc533945e2c188bc7d0c Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 18 Sep 2025 11:00:15 -0400 Subject: [PATCH 30/53] Add interleaving 1d fixed consolidation test --- .../unit-fragment-info-global-order-bounds.cc | 71 ++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index ea426c7dde6..1f58332c222 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -1083,7 +1083,7 @@ TEST_CASE( using Fragment = templates::Fragment1D; - SECTION("Single tile") { + SECTION("Non-overlapping") { std::vector fs; for (uint64_t f = 0; f < 8; f++) { Fragment input; @@ -1196,4 +1196,73 @@ TEST_CASE( fragment_bounds[7][0]}); } } + + auto tile = [](uint64_t lb, uint64_t ub) -> Bounds { + return std::make_pair(std::make_tuple(lb), std::make_tuple(ub)); + }; + + SECTION("Interleaving") { + std::vector fs; + for (uint64_t f = 0; f < 8; f++) { + Fragment input; + input.resize(8); + for (uint64_t c = 0; c < 8; c++) { + input.dimension()[c] = (8 * c + 1 + f); + } + fs.push_back(input); + } + + const auto fragment_bounds = + assert_written_bounds( + vfs_test_setup.ctx(), array_uri, fs); + REQUIRE(fragment_bounds.size() == fs.size()); + + CHECK( + fragment_bounds == std::vector>>{ + {tile(1, 57)}, + {tile(2, 58)}, + {tile(3, 59)}, + {tile(4, 60)}, + {tile(5, 61)}, + {tile(6, 62)}, + {tile(7, 63)}, + {tile(8, 64)}}); + + SECTION("Pairs") { + const auto pairwise = + assert_consolidate_n_wise_bounds( + ctx, array_uri, fs, 2); + + CHECK( + pairwise.bounds_ == std::vector>>{ + {tile(1, 26), tile(33, 58)}, + {tile(3, 28), tile(35, 60)}, + {tile(5, 30), tile(37, 62)}, + {tile(7, 32), tile(39, 64)}}); + + const auto quadwise = + assert_consolidate_n_wise_bounds( + ctx, array_uri, pairwise.fragment_data_, 2); + CHECK( + quadwise.bounds_ == + std::vector>>{ + {tile(1, 12), tile(17, 28), tile(33, 44), tile(49, 60)}, + {tile(5, 16), tile(21, 32), tile(37, 48), tile(53, 64)}, + }); + + const auto octwise = + assert_consolidate_n_wise_bounds( + ctx, array_uri, quadwise.fragment_data_, 2); + CHECK( + octwise.bounds_ == std::vector>>{ + {tile(1, 8), + tile(9, 16), + tile(17, 24), + tile(25, 32), + tile(33, 40), + tile(41, 48), + tile(49, 56), + tile(57, 64)}}); + } + } } From f1f64a5528c037702eb08485351438709810723c Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 18 Sep 2025 21:56:46 -0400 Subject: [PATCH 31/53] Additional consolidation tests, 1d fixed rapidcheck and 1d var --- .../unit-fragment-info-global-order-bounds.cc | 278 ++++++++++++++++++ 1 file changed, 278 insertions(+) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 1f58332c222..b50ec46b319 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -195,6 +195,12 @@ static void allocate_var_bound_buffers( template using Bounds = std::pair, CoordsTuple>; +template +using FragmentBounds = std::vector>; + +template +using ArrayBounds = std::vector>; + /** * @return the lower and upper bounds of tile `(f, t)` in the fragment info */ @@ -1264,5 +1270,277 @@ TEST_CASE( tile(49, 56), tile(57, 64)}}); } + + SECTION("Triples") { + const auto triwise = assert_consolidate_n_wise_bounds< + tiledb::test::AsserterCatch, + Fragment>(ctx, array_uri, fs, 3); + + CHECK( + triwise.bounds_ == ArrayBounds{ + {tile(1, 18), tile(19, 41), tile(42, 58)}, + {tile(4, 21), tile(22, 44), tile(45, 61)}, + {tile(7, 32), tile(39, 64)}}); + + const auto ninewise = assert_consolidate_n_wise_bounds< + tiledb::test::AsserterCatch, + Fragment>(ctx, array_uri, triwise.fragment_data_, 3); + + CHECK( + ninewise.bounds_ == std::vector>>{ + {tile(1, 8), + tile(9, 16), + tile(17, 24), + tile(25, 32), + tile(33, 40), + tile(41, 48), + tile(49, 56), + tile(57, 64)}}); + } + } +} + +template +void rapidcheck_instance_consolidation( + const Context& ctx, + const std::string& array_uri, + uint64_t fan_in, + const std::vector& input) { + Array forread(ctx, array_uri, TILEDB_READ); + std::vector global_order_fragments; + for (const auto& fragment : input) { + global_order_fragments.push_back( + make_global_order(forread, fragment, sm::Layout::UNORDERED)); + } + + ConsolidateOutput state; + state.fragment_data_ = global_order_fragments; + state.bounds_ = assert_written_bounds( + ctx, array_uri, global_order_fragments, sm::Layout::GLOBAL_ORDER); + + while (state.bounds_.size() > 1) { + state = + assert_consolidate_n_wise_bounds( + ctx, array_uri, state.fragment_data_, fan_in); + } +} + +TEST_CASE( + "Fragment metadata global order bounds: 1D var consolidation", + "[fragment_info][global-order]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_1d_var_consolidation"); + + const bool allow_dups = GENERATE(true, false); + + const templates::Dimension dimension; + + Context ctx = vfs_test_setup.ctx(); + + templates::ddl::create_array( + array_uri, + ctx, + std::tuple&>{ + dimension}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + DeleteArrayGuard delarray(ctx.ptr().get(), array_uri.c_str()); + + using Fragment = Fragment1DVar; + + const uint64_t num_cells_per_fragment = 16; + std::vector input; + for (uint64_t f = 0; f < 8; f++) { + Fragment fdata; + for (uint64_t c = 0; c < num_cells_per_fragment; c++) { + const std::string value = std::to_string(c + f * num_cells_per_fragment); + fdata.dimension().push_back(std::span( + reinterpret_cast(value.data()), value.size())); + } + input.push_back(fdata); + } + + auto tile = [](std::string_view lb, std::string_view ub) -> Bounds { + return std::make_pair( + std::make_tuple(std::vector(lb.begin(), lb.end())), + std::make_tuple(std::vector(ub.begin(), ub.end()))); + }; + + const auto fragment_bounds = + assert_written_bounds( + ctx, array_uri, input, sm::Layout::UNORDERED); + + REQUIRE( + fragment_bounds == ArrayBounds{ + {tile("0", "15"), tile("2", "9")}, + {tile("16", "23"), tile("24", "31")}, + {tile("32", "39"), tile("40", "47")}, + {tile("48", "55"), tile("56", "63")}, + {tile("64", "71"), tile("72", "79")}, + {tile("80", "87"), tile("88", "95")}, + {tile("100", "107"), tile("108", "99")}, + {tile("112", "119"), tile("120", "127")}}); + + SECTION("Pairs") { + const auto pairwise = + assert_consolidate_n_wise_bounds( + ctx, array_uri, input, 2); + + CHECK( + pairwise.bounds_ == ArrayBounds{ + {tile("0", "15"), + tile("16", "22"), + tile("23", "3"), + tile("30", "9")}, + {tile("32", "39"), + tile("40", "47"), + tile("48", "55"), + tile("56", "63")}, + {tile("64", "71"), + tile("72", "79"), + tile("80", "87"), + tile("88", "95")}, + {tile("100", "107"), + tile("108", "115"), + tile("116", "123"), + tile("124", "99")}}); + + const auto quadwise = + assert_consolidate_n_wise_bounds( + ctx, array_uri, pairwise.fragment_data_, 2); + + CHECK( + quadwise.bounds_ == ArrayBounds{ + {tile("0", "15"), + tile("16", "22"), + tile("23", "3"), + tile("30", "37"), + tile("38", "44"), + tile("45", "51"), + tile("52", "59"), + tile("6", "9")}, + {tile("100", "107"), + tile("108", "115"), + tile("116", "123"), + tile("124", "67"), + tile("68", "75"), + tile("76", "83"), + tile("84", "91"), + tile("92", "99")}}); + + const auto octwise = + assert_consolidate_n_wise_bounds( + ctx, array_uri, quadwise.fragment_data_, 2); + + CHECK( + octwise.bounds_ == ArrayBounds{ + {tile("0", "104"), + tile("105", "111"), + tile("112", "119"), + tile("12", "126"), + tile("127", "19"), + tile("2", "26"), + tile("27", "33"), + tile("34", "40"), + tile("41", "48"), + tile("49", "55"), + tile("56", "62"), + tile("63", "7"), + tile("70", "77"), + tile("78", "84"), + tile("85", "91"), + tile("92", "99")}}); } + + SECTION("Triples") { + const auto triwise = + assert_consolidate_n_wise_bounds( + ctx, array_uri, input, 3); + + CHECK( + triwise.bounds_ == ArrayBounds{ + {tile("0", "15"), + tile("16", "22"), + tile("23", "3"), + tile("30", "37"), + tile("38", "44"), + tile("45", "9")}, + {tile("48", "55"), + tile("56", "63"), + tile("64", "71"), + tile("72", "79"), + tile("80", "87"), + tile("88", "95")}, + {tile("100", "107"), + tile("108", "115"), + tile("116", "123"), + tile("124", "99")}}); + + const auto ninewise = + assert_consolidate_n_wise_bounds( + ctx, array_uri, triwise.fragment_data_, 3); + + CHECK( + ninewise.bounds_ == ArrayBounds{ + {tile("0", "104"), + tile("105", "111"), + tile("112", "119"), + tile("12", "126"), + tile("127", "19"), + tile("2", "26"), + tile("27", "33"), + tile("34", "40"), + tile("41", "48"), + tile("49", "55"), + tile("56", "62"), + tile("63", "7"), + tile("70", "77"), + tile("78", "84"), + tile("85", "91"), + tile("92", "99")}}); + } +} + +TEST_CASE( + "Fragment metadata global order bounds: 1D fixed consolidation rapidcheck", + "[fragment_info][global-order][rapidcheck]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_1d_fixed_consolidation"); + + const templates::Dimension dimension( + templates::Domain(0, 1024 * 8), 16); + + Context ctx = vfs_test_setup.ctx(); + + auto temp_array = [&](bool allow_dups) { + templates::ddl::create_array( + array_uri, + ctx, + std::tuple&>{dimension}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); + }; + + rc::prop("1D fixed consolidation", [&](bool allow_dups) { + uint64_t fan_in = *rc::gen::inRange(2, 8); + auto fragments = *rc::gen::suchThat( + rc::gen::container>( + rc::make_fragment_1d(allow_dups, dimension.domain)), + [](auto value) { return value.size() > 1; }); + + auto arrayguard = temp_array(allow_dups); + rapidcheck_instance_consolidation( + ctx, array_uri, fan_in, fragments); + }); } From d7cc66023d573457f55ecef0a8d7862b2551659c Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Fri, 19 Sep 2025 11:22:53 -0400 Subject: [PATCH 32/53] Domain::cell_order_cmp overload consistency --- tiledb/sm/array_schema/domain.cc | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tiledb/sm/array_schema/domain.cc b/tiledb/sm/array_schema/domain.cc index 9c7b39ed390..aa157119697 100644 --- a/tiledb/sm/array_schema/domain.cc +++ b/tiledb/sm/array_schema/domain.cc @@ -273,10 +273,9 @@ int Domain::cell_order_cmp( const type::DomainDataRef& left, const type::DomainDataRef& right) const { if (cell_order_ == Layout::ROW_MAJOR || cell_order_ == Layout::HILBERT) { for (unsigned d = 0; d < dim_num_; ++d) { - auto res = cell_order_cmp_func_[d]( - dimension_ptr(d), - left.dimension_datum_view(d), - right.dimension_datum_view(d)); + const auto ldatum = left.dimension_datum_view(d); + const auto rdatum = right.dimension_datum_view(d); + auto res = cell_order_cmp(d, ldatum, rdatum); if (res == 1 || res == -1) return res; @@ -284,10 +283,9 @@ int Domain::cell_order_cmp( } } else { // COL_MAJOR for (unsigned d = dim_num_ - 1;; --d) { - auto res = cell_order_cmp_func_[d]( - dimension_ptr(d), - left.dimension_datum_view(d), - right.dimension_datum_view(d)); + const auto ldatum = left.dimension_datum_view(d); + const auto rdatum = right.dimension_datum_view(d); + auto res = cell_order_cmp(d, ldatum, rdatum); if (res == 1 || res == -1) return res; From 8805da0252830b757ce8b593f7d0596ea746ca82 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Fri, 19 Sep 2025 11:24:36 -0400 Subject: [PATCH 33/53] Add 1D var rapidcheck test, does not pass --- .../unit-fragment-info-global-order-bounds.cc | 86 +++++++++++++++++-- test/support/rapidcheck/array_templates.h | 20 ++++- test/support/src/array_templates.h | 22 +++-- 3 files changed, 112 insertions(+), 16 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index b50ec46b319..54b046620a2 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -45,7 +45,8 @@ using namespace tiledb::test; using Fragment1DFixed = templates::Fragment1D; using Fragment2DFixed = templates::Fragment2D; -using Fragment1DVar = templates::Fragment1D>; +using Fragment1DVar = + templates::Fragment1D; void showValue(const Fragment1DFixed& value, std::ostream& os) { rc::showFragment(value, os); @@ -144,7 +145,7 @@ static void prepare_bound_buffers( DimensionTuple& bufs, std::array& ptrs) { auto prepare_bound_buffer = [&](uint64_t i, templates::query_buffers& qbuf) { - if constexpr (std::is_same_v>) { + if constexpr (std::is_same_v) { if (qbuf.values_.empty()) { ptrs[i] = nullptr; } else { @@ -176,7 +177,7 @@ static void allocate_var_bound_buffers( DimensionTuple& bufs, size_t sizes[]) { auto allocate_var_bound_buffer = [&](uint64_t i, templates::query_buffers& qbuf) { - if constexpr (std::is_same_v>) { + if constexpr (std::is_same_v) { qbuf.values_.resize(sizes[i]); qbuf.offsets_ = {0}; } else { @@ -858,7 +859,8 @@ TEST_CASE( SECTION("Minimum write") { std::string svalue = GENERATE("foo", "", "long-ish string"); - const std::vector value(svalue.begin(), svalue.end()); + const templates::StringDimensionCoordType value( + svalue.begin(), svalue.end()); Fragment f; f.dimension().push_back(value); @@ -891,7 +893,7 @@ TEST_CASE( for (const auto& s1 : words) { for (const auto& s2 : words) { for (const auto& s3 : words) { - std::vector coord(s1.begin(), s1.end()); + templates::StringDimensionCoordType coord(s1.begin(), s1.end()); coord.insert(coord.end(), s2.begin(), s2.end()); coord.insert(coord.end(), s3.begin(), s3.end()); @@ -973,6 +975,70 @@ TEST_CASE( } } } + + SECTION("Shrinking") { + Fragment f; + f.dimension().push_back(templates::StringDimensionCoordType{'o', 'a'}); + f.dimension().push_back(templates::StringDimensionCoordType{'o', '\324'}); + + { + Array forread(ctx, array_uri, TILEDB_READ); + f = make_global_order(forread, f, sm::Layout::UNORDERED); + } + assert_written_bounds( + ctx, array_uri, std::vector{f}, sm::Layout::UNORDERED); + } +} + +TEST_CASE( + "Fragment metadata global order bounds: 1D var rapidcheck", + "[fragment_info][global-order][rapidcheck]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_1d_var_rapidcheck"); + + const templates::StringDimensionCoordType LB = {'a'}; + const templates::StringDimensionCoordType UB = {'z'}; + const templates::Domain domain(LB, UB); + const templates::Dimension dimension(domain); + + Context ctx = vfs_test_setup.ctx(); + + auto temp_array = [&](bool allow_dups) { + templates::ddl::create_array( + array_uri, + ctx, + std::tuple&>{ + dimension}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); + }; + + using F = Fragment1DVar; + + rc::prop("1D var rapidcheck", [&](bool allow_dups) { + auto fragments = *rc::gen::container>( + rc::make_fragment_1d( + allow_dups, domain)); + auto arrayguard = temp_array(allow_dups); + Array forread(ctx, array_uri, TILEDB_READ); + std::vector global_order_fragments; + for (const auto& fragment : fragments) { + global_order_fragments.push_back( + make_global_order(forread, fragment, sm::Layout::UNORDERED)); + } + + assert_written_bounds( + vfs_test_setup.ctx(), + array_uri, + global_order_fragments, + sm::Layout::GLOBAL_ORDER); + }); } template @@ -1359,16 +1425,18 @@ TEST_CASE( Fragment fdata; for (uint64_t c = 0; c < num_cells_per_fragment; c++) { const std::string value = std::to_string(c + f * num_cells_per_fragment); - fdata.dimension().push_back(std::span( - reinterpret_cast(value.data()), value.size())); + fdata.dimension().push_back( + templates::StringDimensionCoordView(value.data(), value.size())); } input.push_back(fdata); } auto tile = [](std::string_view lb, std::string_view ub) -> Bounds { return std::make_pair( - std::make_tuple(std::vector(lb.begin(), lb.end())), - std::make_tuple(std::vector(ub.begin(), ub.end()))); + std::make_tuple( + templates::StringDimensionCoordType(lb.begin(), lb.end())), + std::make_tuple( + templates::StringDimensionCoordType(ub.begin(), ub.end()))); }; const auto fragment_bounds = diff --git a/test/support/rapidcheck/array_templates.h b/test/support/rapidcheck/array_templates.h index beec67e8cd2..909488c5d12 100644 --- a/test/support/rapidcheck/array_templates.h +++ b/test/support/rapidcheck/array_templates.h @@ -139,7 +139,19 @@ Gen make_coordinate(const templates::Domain& domain) { // whereas the domain upper bound is inclusive. // As a result some contortion is required to deal // with numeric_limits. - if (std::is_signed::value) { + if constexpr (std::is_same_v) { + // NB: poor performance with small domains for sure + return gen::suchThat( + gen::map( + gen::string(), + [](std::string s) { + StringDimensionCoordType v(s.begin(), s.end()); + return v; + }), + [domain](const StringDimensionCoordType& s) { + return domain.lower_bound <= s && s <= domain.upper_bound; + }); + } else if constexpr (std::is_signed::value) { if (int64_t(domain.upper_bound) < std::numeric_limits::max()) { return gen::cast(gen::inRange( int64_t(domain.lower_bound), int64_t(domain.upper_bound + 1))); @@ -185,7 +197,11 @@ Gen> make_fragment_1d( std::apply( [&](std::vector tup_d1, auto... tup_atts) { - coords.values_ = tup_d1; + if constexpr (std::is_same_v) { + coords = query_buffers(tup_d1); + } else { + coords.values_ = tup_d1; + } atts = std::apply( [&](std::vector... att) { return std::make_tuple(query_buffers(att)...); diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index 5e5a6e7984c..b082c2a6cd4 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -59,6 +59,9 @@ class Dimension; namespace tiledb::test::templates { +using StringDimensionCoordType = std::vector; +using StringDimensionCoordView = std::span; + /** * Adapts a `std::tuple` whose fields are all `GlobalCellCmp` * to itself be `GlobalCellCmp`. @@ -75,8 +78,8 @@ struct global_cell_cmp_std_tuple { const T& field) { static_assert( stdx::is_fundamental || - std::is_same_v> || - std::is_same_v>); + std::is_same_v || + std::is_same_v); if constexpr (stdx::is_fundamental) { return UntypedDatumView(&field, sizeof(T)); } else { @@ -125,7 +128,7 @@ struct query_buffers {}; */ template concept DimensionType = - std::is_same_v> or requires(const D& coord) { + std::is_same_v or requires(const D& coord) { typename std::is_signed; { coord < coord } -> std::same_as; { D(int64_t(coord)) } -> std::same_as; @@ -227,7 +230,16 @@ struct Dimension { template <> struct Dimension { - using value_type = std::vector; + using value_type = StringDimensionCoordType; + + Dimension() { + } + + Dimension(const Domain& domain) + : domain(domain) { + } + + std::optional> domain; }; template @@ -1565,7 +1577,7 @@ void create_array( using CoordType = templates::Dimension::value_type; dimension_names.push_back("d" + std::to_string(dimension_names.size() + 1)); dimension_types.push_back(static_cast(D)); - if constexpr (std::is_same_v>) { + if constexpr (std::is_same_v) { dimension_ranges.push_back(nullptr); dimension_extents.push_back(nullptr); } else { From 9dad9a684313b47c895292e820b896cbd9f4262d Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Fri, 19 Sep 2025 14:07:55 -0400 Subject: [PATCH 34/53] Fix set_tile_global_order_bounds_var tile --- tiledb/sm/fragment/fragment_metadata.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index 7b1b1c131ec..eb15b92ca3e 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -400,12 +400,14 @@ void FragmentMetadata::set_tile_global_order_bounds_fixed( * 3) this */ void FragmentMetadata::set_tile_global_order_bounds_var( - const std::string& dim_name, uint64_t tile, const WriterTileTuple& data) { + const std::string& dim_name, uint64_t wtile, const WriterTileTuple& data) { const auto dim = array_schema_->domain().get_dimension_index(dim_name); if (!array_schema_->domain().dimensions()[dim]->var_size()) { return; } + const uint64_t tile = tile_index_base_ + wtile; + const auto& tile_min = data.global_order_min(); iassert(tile_min.has_value()); From 11d2601f954ba143a3e9897e9833bc07cda266ee Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Fri, 19 Sep 2025 14:09:01 -0400 Subject: [PATCH 35/53] Shrinking section --- .../unit-fragment-info-global-order-bounds.cc | 44 +++++++++++-------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 54b046620a2..0a7d604dfa6 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -975,19 +975,6 @@ TEST_CASE( } } } - - SECTION("Shrinking") { - Fragment f; - f.dimension().push_back(templates::StringDimensionCoordType{'o', 'a'}); - f.dimension().push_back(templates::StringDimensionCoordType{'o', '\324'}); - - { - Array forread(ctx, array_uri, TILEDB_READ); - f = make_global_order(forread, f, sm::Layout::UNORDERED); - } - assert_written_bounds( - ctx, array_uri, std::vector{f}, sm::Layout::UNORDERED); - } } TEST_CASE( @@ -1021,10 +1008,8 @@ TEST_CASE( using F = Fragment1DVar; - rc::prop("1D var rapidcheck", [&](bool allow_dups) { - auto fragments = *rc::gen::container>( - rc::make_fragment_1d( - allow_dups, domain)); + auto instance = [&]( + bool allow_dups, const std::vector& fragments) { auto arrayguard = temp_array(allow_dups); Array forread(ctx, array_uri, TILEDB_READ); std::vector global_order_fragments; @@ -1033,12 +1018,35 @@ TEST_CASE( make_global_order(forread, fragment, sm::Layout::UNORDERED)); } - assert_written_bounds( + assert_written_bounds( vfs_test_setup.ctx(), array_uri, global_order_fragments, sm::Layout::GLOBAL_ORDER); + }; + + rc::prop("1D var rapidcheck", [&](bool allow_dups) { + auto fragments = *rc::gen::container>( + rc::make_fragment_1d( + allow_dups, domain)); + + instance(allow_dups, fragments); }); + + SECTION("Shrinking") { + F f; + f.dimension().push_back(templates::StringDimensionCoordType{'a'}); + f.dimension().push_back(templates::StringDimensionCoordType{'b'}); + f.dimension().push_back(templates::StringDimensionCoordType{'c'}); + f.dimension().push_back(templates::StringDimensionCoordType{'w'}); + f.dimension().push_back(templates::StringDimensionCoordType{'n'}); + f.dimension().push_back(templates::StringDimensionCoordType{'a', 'a'}); + f.dimension().push_back(templates::StringDimensionCoordType{'d'}); + f.dimension().push_back(templates::StringDimensionCoordType{'g'}); + f.dimension().push_back(templates::StringDimensionCoordType{'v'}); + + instance.operator()(false, std::vector{f}); + } } template From 654c6797a5772716555fab9623c830e565d9e0cf Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Fri, 19 Sep 2025 14:09:12 -0400 Subject: [PATCH 36/53] rapidcheck show functions --- .../src/unit-fragment-info-global-order-bounds.cc | 12 ++++++++++++ test/support/rapidcheck/array_templates.h | 15 +++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 0a7d604dfa6..08d95e33561 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -52,11 +52,16 @@ void showValue(const Fragment1DFixed& value, std::ostream& os) { rc::showFragment(value, os); } +void showValue(const Fragment1DVar& value, std::ostream& os) { + rc::showFragment(value, os); +} + void showValue(const Fragment2DFixed& value, std::ostream& os) { rc::showFragment(value, os); } namespace rc::detail { + template struct ShowDefault { static void show(const Fragment1DFixed& value, std::ostream& os) { @@ -64,6 +69,13 @@ struct ShowDefault { } }; +template +struct ShowDefault { + static void show(const Fragment1DVar& value, std::ostream& os) { + rc::showFragment(value, os); + } +}; + } // namespace rc::detail /** diff --git a/test/support/rapidcheck/array_templates.h b/test/support/rapidcheck/array_templates.h index 909488c5d12..2f38834a4ad 100644 --- a/test/support/rapidcheck/array_templates.h +++ b/test/support/rapidcheck/array_templates.h @@ -266,8 +266,23 @@ struct ShowDefault, A, B> { } }; +template +struct ShowDefault>, A, B> { + static void show( + const query_buffers>& value, std::ostream& os) { + std::vector values; + for (uint64_t c = 0; c < value.num_cells(); c++) { + values.push_back(std::string(value[c].begin(), value[c].end())); + } + ::rc::show(values, os); + } +}; + } // namespace detail +/** + * Generic logic to for showing a `templates::FragmentType`. + */ template void showFragment( const templates::Fragment& value, From af2dc5a20a6e7c69b0fa88224ecdf202a29eabd1 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Fri, 19 Sep 2025 14:50:25 -0400 Subject: [PATCH 37/53] 1D var consolidation rapidcheck --- .../unit-fragment-info-global-order-bounds.cc | 49 ++++++++++++++++--- test/support/rapidcheck/array_templates.h | 17 +++++-- 2 files changed, 57 insertions(+), 9 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 08d95e33561..70d8236e8ef 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -996,10 +996,7 @@ TEST_CASE( const auto array_uri = vfs_test_setup.array_uri( "fragment_metadata_global_order_bounds_1d_var_rapidcheck"); - const templates::StringDimensionCoordType LB = {'a'}; - const templates::StringDimensionCoordType UB = {'z'}; - const templates::Domain domain(LB, UB); - const templates::Dimension dimension(domain); + const templates::Dimension dimension; Context ctx = vfs_test_setup.ctx(); @@ -1039,8 +1036,7 @@ TEST_CASE( rc::prop("1D var rapidcheck", [&](bool allow_dups) { auto fragments = *rc::gen::container>( - rc::make_fragment_1d( - allow_dups, domain)); + rc::make_fragment_1d(allow_dups)); instance(allow_dups, fragments); }); @@ -1632,3 +1628,44 @@ TEST_CASE( ctx, array_uri, fan_in, fragments); }); } + +TEST_CASE( + "Fragment metadata global order bounds: 1D var consolidation rapidcheck", + "[fragment_info][global-order][rapidcheck]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_1d_fixed_consolidation"); + + const templates::Dimension dimension; + + Context ctx = vfs_test_setup.ctx(); + + auto temp_array = [&](bool allow_dups) { + templates::ddl::create_array( + array_uri, + ctx, + std::tuple&>{ + dimension}, + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); + }; + + using F = Fragment1DVar; + + rc::prop("1D var consolidation", [&](bool allow_dups) { + uint64_t fan_in = *rc::gen::inRange(2, 8); + auto fragments = *rc::gen::suchThat( + rc::gen::container>( + rc::make_fragment_1d( + allow_dups)), + [](auto value) { return value.size() > 1; }); + + auto arrayguard = temp_array(allow_dups); + rapidcheck_instance_consolidation(ctx, array_uri, fan_in, fragments); + }); +} diff --git a/test/support/rapidcheck/array_templates.h b/test/support/rapidcheck/array_templates.h index 2f38834a4ad..e9c0123d71b 100644 --- a/test/support/rapidcheck/array_templates.h +++ b/test/support/rapidcheck/array_templates.h @@ -178,9 +178,7 @@ Gen> make_range(const templates::Domain& domain) { template Gen> make_fragment_1d( - bool allow_duplicates, const Domain& d) { - auto coord = make_coordinate(d); - + bool allow_duplicates, Gen coord) { auto cell = gen::tuple(coord, gen::arbitrary()...); using Cell = std::tuple; @@ -215,6 +213,19 @@ Gen> make_fragment_1d( }); } +template +Gen> make_fragment_1d( + bool allow_duplicates) { + return make_fragment_1d(allow_duplicates, gen::arbitrary()); +} + +template +Gen> make_fragment_1d( + bool allow_duplicates, const Domain& d) { + auto coord = make_coordinate(d); + return make_fragment_1d(allow_duplicates, coord); +} + template Gen> make_fragment_2d( bool allow_duplicates, From bb4ebaad3bea0ae59059753e362418e5902ee86b Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 22 Sep 2025 11:14:46 -0400 Subject: [PATCH 38/53] make_fragment_3d --- test/support/rapidcheck/array_templates.h | 54 +++++++++++++++++++++++ test/support/src/array_templates.h | 31 +++++++++++++ 2 files changed, 85 insertions(+) diff --git a/test/support/rapidcheck/array_templates.h b/test/support/rapidcheck/array_templates.h index e9c0123d71b..4ad1afdc7c4 100644 --- a/test/support/rapidcheck/array_templates.h +++ b/test/support/rapidcheck/array_templates.h @@ -264,6 +264,60 @@ Gen> make_fragment_2d( }); } +template < + DimensionType D1, + DimensionType D2, + DimensionType D3, + AttributeType... Att> +Gen> make_fragment_3d( + bool allow_duplicates, + std::optional> d1, + std::optional> d2, + std::optional> d3) { + auto coord_d1 = + (d1.has_value() ? make_coordinate(d1.value()) : gen::arbitrary()); + auto coord_d2 = + (d2.has_value() ? make_coordinate(d2.value()) : gen::arbitrary()); + auto coord_d3 = + (d3.has_value() ? make_coordinate(d3.value()) : gen::arbitrary()); + + using Cell = std::tuple; + + auto cell = + gen::tuple(coord_d1, coord_d2, coord_d3, gen::arbitrary()...); + + auto uniqueCoords = [](const Cell& cell) { + return std::make_tuple( + std::get<0>(cell), std::get<1>(cell), std::get<2>(cell)); + }; + + auto cells = gen::nonEmpty( + allow_duplicates ? gen::container>(cell) : + gen::uniqueBy>(cell, uniqueCoords)); + + return gen::map(cells, [](std::vector cells) { + std::vector coords_d1; + std::vector coords_d2; + std::vector coords_d3; + std::tuple...> atts; + + std::apply( + [&](std::vector tup_d1, + std::vector tup_d2, + std::vector tup_d3, + auto... tup_atts) { + coords_d1 = tup_d1; + coords_d2 = tup_d2; + coords_d3 = tup_d3; + atts = std::make_tuple(tup_atts...); + }, + stdx::transpose(cells)); + + return Fragment3D{ + std::make_tuple(coords_d1, coords_d2, coords_d3), atts}; + }); +} + void showValue(const templates::Domain& domain, std::ostream& os); void showValue(const templates::Domain& domain, std::ostream& os); void showValue(const templates::Domain& domain, std::ostream& os); diff --git a/test/support/src/array_templates.h b/test/support/src/array_templates.h index b082c2a6cd4..d821268038f 100644 --- a/test/support/src/array_templates.h +++ b/test/support/src/array_templates.h @@ -1328,6 +1328,37 @@ struct Fragment2D : public Fragment, std::tuple> { } }; +/** + * Data for a three-dimensional array + */ +template +struct Fragment3D + : public Fragment, std::tuple> { + const query_buffers& d1() const { + return std::get<0>(this->dimensions()); + } + + const query_buffers& d2() const { + return std::get<1>(this->dimensions()); + } + + const query_buffers& d3() const { + return std::get<2>(this->dimensions()); + } + + query_buffers& d1() { + return std::get<0>(this->dimensions()); + } + + query_buffers& d2() { + return std::get<1>(this->dimensions()); + } + + query_buffers& d3() { + return std::get<2>(this->dimensions()); + } +}; + /** * Binds variadic field data to a tiledb query */ From 6cb0b1d44bbe11882a9d4a4e0a1fe8f98807cf39 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 22 Sep 2025 11:15:09 -0400 Subject: [PATCH 39/53] 3D vcf rapidcheck --- .../unit-fragment-info-global-order-bounds.cc | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 70d8236e8ef..ad475adb0fc 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -48,6 +48,11 @@ using Fragment2DFixed = templates::Fragment2D; using Fragment1DVar = templates::Fragment1D; +using FragmentVcf2025 = templates::Fragment3D< + templates::StringDimensionCoordType, + uint32_t, + templates::StringDimensionCoordType>; + void showValue(const Fragment1DFixed& value, std::ostream& os) { rc::showFragment(value, os); } @@ -1057,6 +1062,73 @@ TEST_CASE( } } +/** + * Rapidcheck bounds test using the VCF 2025 data model + * (3D sparse array with chromosome/position/sample dimensions) + */ +TEST_CASE( + "Fragment metadata global order bounds: 3D vcf rapidcheck", + "[fragment_info][global-order][rapidcheck]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_3d_vcf_rapidcheck"); + + const templates::Domain domain_sample(0, 10000); + + const templates::Dimension d_chromosome; + const templates::Dimension d_position(domain_sample, 32); + const templates::Dimension d_sample; + + Context ctx = vfs_test_setup.ctx(); + + auto temp_array = [&](bool allow_dups) { + templates::ddl::create_array< + Datatype::STRING_ASCII, + Datatype::UINT32, + Datatype::STRING_ASCII>( + array_uri, + ctx, + std::tie(d_chromosome, d_position, d_sample), + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); + }; + + using F = FragmentVcf2025; + + auto instance = [&]( + bool allow_dups, const std::vector& fragments) { + auto arrayguard = temp_array(allow_dups); + Array forread(ctx, array_uri, TILEDB_READ); + std::vector global_order_fragments; + for (const auto& fragment : fragments) { + global_order_fragments.push_back( + make_global_order(forread, fragment, sm::Layout::UNORDERED)); + } + + assert_written_bounds( + vfs_test_setup.ctx(), + array_uri, + global_order_fragments, + sm::Layout::GLOBAL_ORDER); + }; + + rc::prop("3D vcf2025 rapidcheck", [&](bool allow_dups) { + auto fragments = *rc::gen::container>( + rc::make_fragment_3d< + templates::StringDimensionCoordType, + uint32_t, + templates::StringDimensionCoordType>( + allow_dups, std::nullopt, domain_sample, std::nullopt)); + + instance(allow_dups, fragments); + }); +} + template std::vector>> consolidate_n_wise( const Context& ctx, const std::string& uri, uint64_t fan_in) { From 44469f8cb35b6728992ae069e6996c50f72417ad Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 10:04:57 -0400 Subject: [PATCH 40/53] Add C++ API and use it in tests --- .../unit-fragment-info-global-order-bounds.cc | 124 ++++-------------- tiledb/sm/cpp_api/fragment_info.h | 92 +++++++++++++ 2 files changed, 119 insertions(+), 97 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index ad475adb0fc..4831a18b3a4 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -153,63 +153,6 @@ template using CoordsTuple = decltype(tuple_index( std::declval().dimensions(), std::declval())); -/** - * Stores pointers to the fields of `bufs` in `ptrs` for use - * with the fragment bound functions. - */ -template -static void prepare_bound_buffers( - DimensionTuple& bufs, std::array& ptrs) { - auto prepare_bound_buffer = - [&](uint64_t i, templates::query_buffers& qbuf) { - if constexpr (std::is_same_v) { - if (qbuf.values_.empty()) { - ptrs[i] = nullptr; - } else { - ptrs[i] = qbuf.values_.data(); - } - } else { - if (qbuf.num_cells() == 0) { - ptrs[i] = nullptr; - } else { - ptrs[i] = static_cast(&qbuf[0]); - } - } - }; - - uint64_t i = 0; - std::apply( - [&](templates::query_buffers&... qbufs) { - (prepare_bound_buffer(i++, qbufs), ...); - }, - bufs); -} - -/** - * Reserves space in the variable-length dimensions of `bufs` - * for global order bounds of the provided `sizes`. - */ -template -static void allocate_var_bound_buffers( - DimensionTuple& bufs, size_t sizes[]) { - auto allocate_var_bound_buffer = - [&](uint64_t i, templates::query_buffers& qbuf) { - if constexpr (std::is_same_v) { - qbuf.values_.resize(sizes[i]); - qbuf.offsets_ = {0}; - } else { - qbuf.resize(1); - } - }; - - uint64_t i = 0; - std::apply( - [&](templates::query_buffers&... qbufs) { - (allocate_var_bound_buffer(i++, qbufs), ...); - }, - bufs); -} - template using Bounds = std::pair, CoordsTuple>; @@ -225,50 +168,37 @@ using ArrayBounds = std::vector>; template static Bounds global_order_bounds( const FragmentInfo& finfo, uint64_t fragment, uint64_t tile) { - constexpr size_t num_fields = std::tuple_size>::value; - - // FIXME: there needs to be another API to ask about maximum variable-length. - // Otherwise it is unsafe to call this API with variable-length dimensions - DimensionTuple lb, ub; - size_t lb_sizes[num_fields]; - std::array lb_dimensions; - - size_t ub_sizes[num_fields]; - std::array ub_dimensions; - - auto ctx_c = finfo.context().ptr().get(); - - auto call = [&]() { - prepare_bound_buffers(lb, lb_dimensions); - prepare_bound_buffers(ub, ub_dimensions); - - // FIXME: add C++ API - auto rc = tiledb_fragment_info_get_global_order_lower_bound( - ctx_c, - finfo.ptr().get(), - fragment, - tile, - &lb_sizes[0], - &lb_dimensions[0]); - throw_if_error(ctx_c, rc); - - rc = tiledb_fragment_info_get_global_order_upper_bound( - ctx_c, - finfo.ptr().get(), - fragment, - tile, - &ub_sizes[0], - &ub_dimensions[0]); - throw_if_error(ctx_c, rc); + auto bound_vec_to_tuple = [](DimensionTuple& out, + std::vector> bounds) { + auto handle_field = [&]( + templates::query_buffers& qb, uint64_t d) { + static_assert( + stdx::is_fundamental || + std::is_same_v); + if constexpr (stdx::is_fundamental) { + qb.resize(1); + qb[0] = *reinterpret_cast(bounds[d].data()); + } else { + qb.values_.resize(bounds[d].size()); + memcpy(qb.values_.data(), bounds[d].data(), bounds[d].size()); + qb.offsets_ = {0}; + } + }; + uint64_t d = 0; + std::apply( + [&](templates::query_buffers&... qb) { + (handle_field(qb, d++), ...); + }, + out); }; - // determine length, then allocate, then call again - call(); - allocate_var_bound_buffers(lb, lb_sizes); - allocate_var_bound_buffers(ub, ub_sizes); - call(); + auto lbvec = finfo.global_order_lower_bound(fragment, tile); + bound_vec_to_tuple(lb, lbvec); + + auto ubvec = finfo.global_order_upper_bound(fragment, tile); + bound_vec_to_tuple(ub, ubvec); return std::make_pair(tuple_index(lb, 0), tuple_index(ub, 0)); } diff --git a/tiledb/sm/cpp_api/fragment_info.h b/tiledb/sm/cpp_api/fragment_info.h index a398c68ffaa..d18670d3d7f 100644 --- a/tiledb/sm/cpp_api/fragment_info.h +++ b/tiledb/sm/cpp_api/fragment_info.h @@ -279,6 +279,98 @@ class FragmentInfo { return std::make_pair(start, end); } + /** + * Returns the minimum coordinate in global order for a particular bounding + * rectangle in a fragment. The returned value contains one element per + * dimension. + * + * @throws if the fragment index `fid` is invalid; if the bounding rectangle + * index `mid` is invalid; if the fragment is not a sparse fragment; or if the + * fragment was written in a format version which does not contain the + * bounding rectangle global order bounds. + */ + std::vector> global_order_lower_bound( + uint32_t fid, uint32_t mid) const { + std::vector dimension_sizes; + std::vector dimension_ptrs; + dimension_sizes.resize(array_schema(fid).domain().ndim()); + dimension_ptrs.resize(dimension_sizes.size()); + + // 1. get sizes + ctx_.get().handle_error(tiledb_fragment_info_get_global_order_lower_bound( + ctx_.get().ptr().get(), + fragment_info_.get(), + fid, + mid, + dimension_sizes.data(), + dimension_ptrs.data())); + + // 2. get data + std::vector> dimension_bufs; + dimension_bufs.resize(dimension_sizes.size()); + + for (uint64_t d = 0; d < dimension_sizes.size(); d++) { + dimension_bufs[d].resize(dimension_sizes[d]); + dimension_ptrs[d] = dimension_bufs[d].data(); + } + + ctx_.get().handle_error(tiledb_fragment_info_get_global_order_lower_bound( + ctx_.get().ptr().get(), + fragment_info_.get(), + fid, + mid, + dimension_sizes.data(), + dimension_ptrs.data())); + + return dimension_bufs; + } + + /** + * Returns the maximum coordinate in global order for a particular bounding + * rectangle in a fragment. The returned value contains one element per + * dimension. + * + * @throws if the fragment index `fid` is invalid; if the bounding rectangle + * index `mid` is invalid; if the fragment is not a sparse fragment; or if the + * fragment was written in a format version which does not contain the + * bounding rectangle global order bounds. + */ + std::vector> global_order_upper_bound( + uint32_t fid, uint32_t mid) const { + std::vector dimension_sizes; + std::vector dimension_ptrs; + dimension_sizes.resize(array_schema(fid).domain().ndim()); + dimension_ptrs.resize(dimension_sizes.size()); + + // 1. get sizes + ctx_.get().handle_error(tiledb_fragment_info_get_global_order_upper_bound( + ctx_.get().ptr().get(), + fragment_info_.get(), + fid, + mid, + dimension_sizes.data(), + dimension_ptrs.data())); + + // 2. get data + std::vector> dimension_bufs; + dimension_bufs.resize(dimension_sizes.size()); + + for (uint64_t d = 0; d < dimension_sizes.size(); d++) { + dimension_bufs[d].resize(dimension_sizes[d]); + dimension_ptrs[d] = dimension_bufs[d].data(); + } + + ctx_.get().handle_error(tiledb_fragment_info_get_global_order_upper_bound( + ctx_.get().ptr().get(), + fragment_info_.get(), + fid, + mid, + dimension_sizes.data(), + dimension_ptrs.data())); + + return dimension_bufs; + } + /** Returns the number of fragments. */ uint32_t fragment_num() const { auto& ctx = ctx_.get(); From d176eb67089acc59c8a3ec8754914db3dac69cf6 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 10:07:49 -0400 Subject: [PATCH 41/53] Add 3D vcf consolidation rapidcheck test --- .../unit-fragment-info-global-order-bounds.cc | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 4831a18b3a4..3a677dd054f 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -1671,3 +1671,56 @@ TEST_CASE( rapidcheck_instance_consolidation(ctx, array_uri, fan_in, fragments); }); } + +/** + * Rapidcheck bounds consolidation test using the VCF 2025 data model + * (3D sparse array with chromosome/position/sample dimensions) + */ +TEST_CASE( + "Fragment metadata global order bounds: 3D vcf consolidation rapidcheck", + "[fragment_info][global-order][rapidcheck]") { + VFSTestSetup vfs_test_setup; + const auto array_uri = vfs_test_setup.array_uri( + "fragment_metadata_global_order_bounds_3d_vcf_consolidation"); + + const templates::Domain domain_sample(0, 10000); + + const templates::Dimension d_chromosome; + const templates::Dimension d_position(domain_sample, 32); + const templates::Dimension d_sample; + + Context ctx = vfs_test_setup.ctx(); + + auto temp_array = [&](bool allow_dups) { + templates::ddl::create_array< + Datatype::STRING_ASCII, + Datatype::UINT32, + Datatype::STRING_ASCII>( + array_uri, + ctx, + std::tie(d_chromosome, d_position, d_sample), + std::vector>{}, + TILEDB_ROW_MAJOR, + TILEDB_ROW_MAJOR, + 8, + allow_dups); + + return DeleteArrayGuard(ctx.ptr().get(), array_uri.c_str()); + }; + + using F = FragmentVcf2025; + + rc::prop("3D vcf2025 consolidation", [&](bool allow_dups) { + uint64_t fan_in = *rc::gen::inRange(2, 8); + auto fragments = *rc::gen::suchThat( + rc::gen::container>(rc::make_fragment_3d< + templates::StringDimensionCoordType, + uint32_t, + templates::StringDimensionCoordType>( + allow_dups, std::nullopt, domain_sample, std::nullopt)), + [](auto value) { return value.size() > 1; }); + + auto arrayguard = temp_array(allow_dups); + rapidcheck_instance_consolidation(ctx, array_uri, fan_in, fragments); + }); +} From e76b0ce0e1f3a4de733dbabb74683def9655467a Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 10:26:03 -0400 Subject: [PATCH 42/53] Update format spec --- format_spec/fragment.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/format_spec/fragment.md b/format_spec/fragment.md index 78dd065b0a4..110388a1962 100644 --- a/format_spec/fragment.md +++ b/format_spec/fragment.md @@ -82,6 +82,12 @@ The fragment metadata file has the following on-disk format: | Tile maxes for attribute/dimension 1 | [Tile Mins/Maxes](#tile-mins-maxes) | _New in version 11_ The serialized maxes for attribute/dimension 1 | | … | … | … | | Variable maxes for attribute/dimension N | [Tile Mins/Maxes](#tile-mins-maxes) | _New in version 11_ The serialized maxes for attribute/dimension N | +| Tile global order min coordinates for dimension 1 | [Tile Mins/Maxes](#tile-mins-maxes) | _New in version 23_ For sparse arrays, the serialized value of dimension 1 of the global order minimum coordinate in each tile. | +| … | … | … | +| Variable global order min coordinates for dimension N | [Tile Mins/Maxes](#tile-mins-maxes) | _New in version 23_ For sparse arrays, the serialized value of dimension N of the global order minimum coordinate in each tile. | +| Tile global order max coordinates for dimension 1 | [Tile Mins/Maxes](#tile-mins-maxes) | _New in version 23_ For sparse arrays, the serialized value of dimension 1 of the global order maximum coordinate in each tile. | +| … | … | … | +| Variable global order max coordinates for dimension N | [Tile Mins/Maxes](#tile-mins-maxes) | _New in version 23_ For sparse arrays, the serialized value of dimension N of the global order maximum coordinate in each tile. | | Tile sums for attribute/dimension 1 | [Tile Sums](#tile-sums) | _New in version 11_ The serialized sums for attribute/dimension 1 | | … | … | … | | Variable sums for attribute/dimension N | [Tile Sums](#tile-sums) | _New in version 11_ The serialized sums for attribute/dimension N | @@ -276,6 +282,12 @@ The footer is a simple blob \(i.e., _not a generic tile_\) with the following in | Tile maxes offset for attribute/dimension 1 | `uint64_t` | The offset to the generic tile storing the tile maxes for attribute/dimension 1. | | … | … | … | | Tile maxes offset for attribute/dimension N | `uint64_t` | The offset to the generic tile storing the tile maxes for attribute/dimension N | +| Tile global order min coordinates offset for dimension 1 | `uint64_t` | _New in version 23_ For sparse arrays, he offset to the generic tile storing the tile global order mins for dimension 1 +| … | … | … | +| Tile global order min coordinates offset for dimension N | `uint64_t` | _New in version 23_ For sparse arrays, the offset to the generic tile storing the tile global order mins for dimension N +| Tile global order max coordinates offset for dimension 1 | `uint64_t` | _New in version 23_ For sparse arrays, the offset to the generic tile storing the tile global order maxes for dimension 1 +| … | … | … | +| Tile global order max coordinates offset for dimension N | `uint64_t` | _New in version 23_ For sparse arrays, the offset to the generic tile storing the tile global order maxes for dimension N | Tile sums offset for attribute/dimension 1 | `uint64_t` | The offset to the generic tile storing the tile sums for attribute/dimension 1. | | … | … | … | | Tile sums offset for attribute/dimension N | `uint64_t` | The offset to the generic tile storing the tile sums for attribute/dimension N | From 029592c29e3e42fa12819b7a8fce061bec99950e Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 10:35:53 -0400 Subject: [PATCH 43/53] Only write global order min/maxes for sparse array --- tiledb/sm/fragment/fragment_metadata.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index eb15b92ca3e..903cfb6e919 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -1286,7 +1286,7 @@ void FragmentMetadata::store_v15_or_higher( offset += nbytes; } - if (!array_schema_->dense() && + if (!dense_ && version_ >= constants::fragment_metadata_global_order_bounds_version) { const auto num_dims = array_schema_->dim_num(); // Store global order mins @@ -2542,7 +2542,8 @@ void FragmentMetadata::load_generic_tile_offsets_v16_or_higher( gt_offsets_.tile_max_offsets_.resize(num); deserializer.read(>_offsets_.tile_max_offsets_[0], num * sizeof(uint64_t)); - if (version_ >= constants::fragment_metadata_global_order_bounds_version) { + if (!dense_ && + version_ >= constants::fragment_metadata_global_order_bounds_version) { // Load offsets for the tile global order bounds const auto num_dims = array_schema_->dim_num(); gt_offsets_.tile_global_order_min_offsets_.resize(num_dims); @@ -2796,7 +2797,8 @@ void FragmentMetadata::write_generic_tile_offsets( serializer.write(>_offsets_.tile_max_offsets_[0], num * sizeof(uint64_t)); } - if (version_ >= constants::fragment_metadata_global_order_bounds_version) { + if (!dense_ && + version_ >= constants::fragment_metadata_global_order_bounds_version) { // Write the tile global order bound offsets const auto num_dims = array_schema_->dim_num(); serializer.write( From 36d624cc8426eba65e778e4f5b93813e443d0e5c Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 11:06:25 -0400 Subject: [PATCH 44/53] Allow dimension_sizes arg to be nullptr --- tiledb/sm/fragment/fragment_info.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tiledb/sm/fragment/fragment_info.cc b/tiledb/sm/fragment/fragment_info.cc index 84681b9aec8..733c6115ad5 100644 --- a/tiledb/sm/fragment/fragment_info.cc +++ b/tiledb/sm/fragment/fragment_info.cc @@ -762,7 +762,9 @@ static Status read_global_order_bound_to_user_buffers( "Cannot get MBR global order bound: Invalid mbr index"); } - dimension_sizes[d] = dimFixedSize; + if (dimension_sizes) { + dimension_sizes[d] = dimFixedSize; + } if (dimensions[d]) { const void* coord = &fixedPart[d].data()[which_tile * dimFixedSize]; From 369e02a100a365d49aacdef840dcfc49b6a355ff Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 11:06:44 -0400 Subject: [PATCH 45/53] Recapture output for 'dump with string dimension' test --- test/src/unit-capi-fragment_info.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/src/unit-capi-fragment_info.cc b/test/src/unit-capi-fragment_info.cc index 96213bb6177..887a4174006 100644 --- a/test/src/unit-capi-fragment_info.cc +++ b/test/src/unit-capi-fragment_info.cc @@ -1965,7 +1965,7 @@ TEST_CASE( "- Unconsolidated metadata num: 1\n" + "- To vacuum num: 0\n" + "- Fragment #1:\n" + " > URI: " + written_frag_uri + "\n" + " > Schema name: " + schema_name + "\n" + " > Type: sparse\n" + - " > Non-empty domain: [a, ddd]\n" + " > Size: 3439\n" + + " > Non-empty domain: [a, ddd]\n" + " > Size: 3674\n" + " > Cell num: 4\n" + " > Timestamp range: [1, 1]\n" + " > Format version: " + ver + "\n" + " > Has consolidated metadata: no\n"; From 045beb49f1945128253032b643928ed5ef560288 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 11:15:19 -0400 Subject: [PATCH 46/53] Fix interleaving triples output --- test/src/unit-fragment-info-global-order-bounds.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index 3a677dd054f..cd3bfd25f90 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -1362,8 +1362,8 @@ TEST_CASE( CHECK( triwise.bounds_ == ArrayBounds{ - {tile(1, 18), tile(19, 41), tile(42, 58)}, - {tile(4, 21), tile(22, 44), tile(45, 61)}, + {tile(1, 18), tile(19, 41), tile(42, 59)}, + {tile(4, 21), tile(22, 44), tile(45, 62)}, {tile(7, 32), tile(39, 64)}}); const auto ninewise = assert_consolidate_n_wise_bounds< From 0e4d96069308293711b45913f75b6b114d3daee8 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 12:50:46 -0400 Subject: [PATCH 47/53] Fix cpp context lifetime issue --- test/src/unit-sparse-global-order-reader.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/src/unit-sparse-global-order-reader.cc b/test/src/unit-sparse-global-order-reader.cc index 11c363b65b0..ba6aebbdc3d 100644 --- a/test/src/unit-sparse-global-order-reader.cc +++ b/test/src/unit-sparse-global-order-reader.cc @@ -723,7 +723,8 @@ void CSparseGlobalOrderFx::write_fragment( } CApiArray& array = *existing; - Array cpparray(vfs_test_setup_.ctx(), array, false); + Context cppctx = vfs_test_setup_.ctx(); + Array cpparray(cppctx, array, false); templates::query::write_fragment( fragment, cpparray, TILEDB_UNORDERED); From 4d08b0cc5cf0187bee0975113f9421f9cb157222 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 13:53:08 -0400 Subject: [PATCH 48/53] constexpr UntypedDatumView methods/constructors --- tiledb/common/types/untyped_datum.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tiledb/common/types/untyped_datum.h b/tiledb/common/types/untyped_datum.h index 1f61e681ea7..51ee98a0f7c 100644 --- a/tiledb/common/types/untyped_datum.h +++ b/tiledb/common/types/untyped_datum.h @@ -41,23 +41,23 @@ class UntypedDatumView { size_t datum_size_; public: - UntypedDatumView(const void* content, size_t size) + constexpr UntypedDatumView(const void* content, size_t size) : datum_content_(content) , datum_size_(size) { } - UntypedDatumView(std::string_view ss) + constexpr UntypedDatumView(std::string_view ss) : datum_content_(ss.data()) , datum_size_(ss.size()) { } - [[nodiscard]] inline const void* content() const { + [[nodiscard]] constexpr inline const void* content() const { return datum_content_; } - [[nodiscard]] inline size_t size() const { + [[nodiscard]] constexpr inline size_t size() const { return datum_size_; } template - [[nodiscard]] inline const T& value_as() const { + [[nodiscard]] constexpr inline const T& value_as() const { return *static_cast(datum_content_); } }; From 5886af90f8a44114f477d41510f9e93610133c13 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 13:53:21 -0400 Subject: [PATCH 49/53] Fix cppapi impl dimension_sizes --- tiledb/sm/cpp_api/fragment_info.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tiledb/sm/cpp_api/fragment_info.h b/tiledb/sm/cpp_api/fragment_info.h index d18670d3d7f..61fcfbf1c00 100644 --- a/tiledb/sm/cpp_api/fragment_info.h +++ b/tiledb/sm/cpp_api/fragment_info.h @@ -291,7 +291,7 @@ class FragmentInfo { */ std::vector> global_order_lower_bound( uint32_t fid, uint32_t mid) const { - std::vector dimension_sizes; + std::vector dimension_sizes; std::vector dimension_ptrs; dimension_sizes.resize(array_schema(fid).domain().ndim()); dimension_ptrs.resize(dimension_sizes.size()); @@ -337,7 +337,7 @@ class FragmentInfo { */ std::vector> global_order_upper_bound( uint32_t fid, uint32_t mid) const { - std::vector dimension_sizes; + std::vector dimension_sizes; std::vector dimension_ptrs; dimension_sizes.resize(array_schema(fid).domain().ndim()); dimension_ptrs.resize(dimension_sizes.size()); From 804051e7b12811c5b5973401aa4f2728925a93aa Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Tue, 23 Sep 2025 14:36:55 -0400 Subject: [PATCH 50/53] std::min explicit template to make mac compiler happy --- test/src/unit-fragment-info-global-order-bounds.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/test/src/unit-fragment-info-global-order-bounds.cc b/test/src/unit-fragment-info-global-order-bounds.cc index cd3bfd25f90..abf1a08b818 100644 --- a/test/src/unit-fragment-info-global-order-bounds.cc +++ b/test/src/unit-fragment-info-global-order-bounds.cc @@ -273,7 +273,8 @@ std::vector>> assert_written_bounds( for (size_t t = 0; t < num_tiles; t++) { const uint64_t lbi = t * tile_stride; - const uint64_t ubi = std::min((t + 1) * tile_stride, fragment.size()) - 1; + const uint64_t ubi = + std::min((t + 1) * tile_stride, fragment.size()) - 1; const auto lbexpect = tuple_index(fragment.dimensions(), lbi); const auto ubexpect = tuple_index(fragment.dimensions(), ubi); @@ -1080,7 +1081,8 @@ std::vector>> consolidate_n_wise( } for (uint32_t f = 0; f < s_fragment_uris.size(); f += fan_in) { std::vector fragment_uris; - for (uint32_t ff = f; ff < std::min(f + fan_in, s_fragment_uris.size()); + for (uint64_t ff = f; + ff < std::min(f + fan_in, s_fragment_uris.size()); ff++) { fragment_uris.push_back(s_fragment_uris[ff].c_str()); } @@ -1113,7 +1115,8 @@ ConsolidateOutput assert_consolidate_n_wise_bounds( std::vector output_fragments; for (uint64_t f = 0; f < input_fragment_data.size(); f += fan_in) { F output_fragment; - for (uint64_t ff = f; ff < std::min(f + fan_in, input_fragment_data.size()); + for (uint64_t ff = f; + ff < std::min(f + fan_in, input_fragment_data.size()); ff++) { output_fragment.extend(input_fragment_data[ff]); } @@ -1132,7 +1135,9 @@ ConsolidateOutput assert_consolidate_n_wise_bounds( for (size_t t = 0; t < num_tiles; t++) { const uint64_t lbi = t * tile_stride; const uint64_t ubi = - std::min((t + 1) * tile_stride, output_fragments[f].size()) - 1; + std::min( + (t + 1) * tile_stride, output_fragments[f].size()) - + 1; const auto lbexpect = tuple_index(output_fragments[f].dimensions(), lbi); const auto ubexpect = tuple_index(output_fragments[f].dimensions(), ubi); From 3c316af8f844ed515b98f63d3ab329c0027ebdd8 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 13 Oct 2025 12:47:34 -0400 Subject: [PATCH 51/53] Fix format spec typos and version number --- format_spec/FORMAT_SPEC.md | 2 +- format_spec/array_format_history.md | 6 ++++++ format_spec/fragment.md | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/format_spec/FORMAT_SPEC.md b/format_spec/FORMAT_SPEC.md index ed952a9dbbf..c7356bfb8df 100644 --- a/format_spec/FORMAT_SPEC.md +++ b/format_spec/FORMAT_SPEC.md @@ -4,7 +4,7 @@ title: Format Specification **Notes:** -* The current TileDB array format version number is **22** (`uint32_t`). +* The current TileDB array format version number is **23** (`uint32_t`). * Other structures might be versioned separately. * Data written by TileDB and referenced in this document is **little-endian** with the following exceptions: diff --git a/format_spec/array_format_history.md b/format_spec/array_format_history.md index 8c3f5512ef9..c1fe2909a89 100644 --- a/format_spec/array_format_history.md +++ b/format_spec/array_format_history.md @@ -4,6 +4,12 @@ title: Array format version history # Array Format Version History +## Version 23 + +Introduced in TileDB 2.30 + +* The _Tile global order min/max_ fields were added to [tile metadata](./fragment.md#tile-mins-maxes). These fields contain the minimum and maximum global order coordinate for each tile in the fragment. This metadata can be used to optimize query execution. + ## Version 22 Introduced in TileDB 2.25 diff --git a/format_spec/fragment.md b/format_spec/fragment.md index 110388a1962..3a9d09f2896 100644 --- a/format_spec/fragment.md +++ b/format_spec/fragment.md @@ -282,7 +282,7 @@ The footer is a simple blob \(i.e., _not a generic tile_\) with the following in | Tile maxes offset for attribute/dimension 1 | `uint64_t` | The offset to the generic tile storing the tile maxes for attribute/dimension 1. | | … | … | … | | Tile maxes offset for attribute/dimension N | `uint64_t` | The offset to the generic tile storing the tile maxes for attribute/dimension N | -| Tile global order min coordinates offset for dimension 1 | `uint64_t` | _New in version 23_ For sparse arrays, he offset to the generic tile storing the tile global order mins for dimension 1 +| Tile global order min coordinates offset for dimension 1 | `uint64_t` | _New in version 23_ For sparse arrays, the offset to the generic tile storing the tile global order mins for dimension 1 | … | … | … | | Tile global order min coordinates offset for dimension N | `uint64_t` | _New in version 23_ For sparse arrays, the offset to the generic tile storing the tile global order mins for dimension N | Tile global order max coordinates offset for dimension 1 | `uint64_t` | _New in version 23_ For sparse arrays, the offset to the generic tile storing the tile global order maxes for dimension 1 From 043445991c5343daec381ccd6abf4f3709435432 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Mon, 13 Oct 2025 13:11:21 -0400 Subject: [PATCH 52/53] Separate method for new versioning --- tiledb/sm/fragment/fragment_metadata.cc | 55 +++++++++++++++++++++++-- tiledb/sm/fragment/fragment_metadata.h | 10 ++++- 2 files changed, 59 insertions(+), 6 deletions(-) diff --git a/tiledb/sm/fragment/fragment_metadata.cc b/tiledb/sm/fragment/fragment_metadata.cc index 903cfb6e919..4dfef83a960 100644 --- a/tiledb/sm/fragment/fragment_metadata.cc +++ b/tiledb/sm/fragment/fragment_metadata.cc @@ -2356,8 +2356,10 @@ void FragmentMetadata::load_generic_tile_offsets(Deserializer& deserializer) { load_generic_tile_offsets_v11(deserializer); } else if (version_ >= 12 && version_ < 16) { load_generic_tile_offsets_v12_v15(deserializer); + } else if (version_ >= 16 && version_ < 23) { + load_generic_tile_offsets_v16_v22(deserializer); } else { - load_generic_tile_offsets_v16_or_higher(deserializer); + load_generic_tile_offsets_v23_or_higher(deserializer); } } @@ -2511,7 +2513,7 @@ void FragmentMetadata::load_generic_tile_offsets_v12_v15( deserializer.read(); } -void FragmentMetadata::load_generic_tile_offsets_v16_or_higher( +void FragmentMetadata::load_generic_tile_offsets_v16_v22( Deserializer& deserializer) { // Load R-Tree offset gt_offsets_.rtree_ = deserializer.read(); @@ -2542,8 +2544,53 @@ void FragmentMetadata::load_generic_tile_offsets_v16_or_higher( gt_offsets_.tile_max_offsets_.resize(num); deserializer.read(>_offsets_.tile_max_offsets_[0], num * sizeof(uint64_t)); - if (!dense_ && - version_ >= constants::fragment_metadata_global_order_bounds_version) { + // Load offsets for tile sum offsets + gt_offsets_.tile_sum_offsets_.resize(num); + deserializer.read(>_offsets_.tile_sum_offsets_[0], num * sizeof(uint64_t)); + + // Load offsets for tile null count offsets + gt_offsets_.tile_null_count_offsets_.resize(num); + deserializer.read( + >_offsets_.tile_null_count_offsets_[0], num * sizeof(uint64_t)); + + gt_offsets_.fragment_min_max_sum_null_count_offset_ = + deserializer.read(); + + gt_offsets_.processed_conditions_offsets_ = deserializer.read(); +} + +void FragmentMetadata::load_generic_tile_offsets_v23_or_higher( + Deserializer& deserializer) { + // Load R-Tree offset + gt_offsets_.rtree_ = deserializer.read(); + + // Load offsets for tile offsets + auto num = num_dims_and_attrs(); + gt_offsets_.tile_offsets_.resize(num); + deserializer.read(>_offsets_.tile_offsets_[0], num * sizeof(uint64_t)); + + // Load offsets for tile var offsets + gt_offsets_.tile_var_offsets_.resize(num); + deserializer.read(>_offsets_.tile_var_offsets_[0], num * sizeof(uint64_t)); + + // Load offsets for tile var sizes + gt_offsets_.tile_var_sizes_.resize(num); + deserializer.read(>_offsets_.tile_var_sizes_[0], num * sizeof(uint64_t)); + + // Load offsets for tile validity offsets + gt_offsets_.tile_validity_offsets_.resize(num); + deserializer.read( + >_offsets_.tile_validity_offsets_[0], num * sizeof(uint64_t)); + + // Load offsets for tile min offsets + gt_offsets_.tile_min_offsets_.resize(num); + deserializer.read(>_offsets_.tile_min_offsets_[0], num * sizeof(uint64_t)); + + // Load offsets for tile max offsets + gt_offsets_.tile_max_offsets_.resize(num); + deserializer.read(>_offsets_.tile_max_offsets_[0], num * sizeof(uint64_t)); + + if (!dense_) { // Load offsets for the tile global order bounds const auto num_dims = array_schema_->dim_num(); gt_offsets_.tile_global_order_min_offsets_.resize(num_dims); diff --git a/tiledb/sm/fragment/fragment_metadata.h b/tiledb/sm/fragment/fragment_metadata.h index 82034d54043..e7fd531b4e8 100644 --- a/tiledb/sm/fragment/fragment_metadata.h +++ b/tiledb/sm/fragment/fragment_metadata.h @@ -1044,9 +1044,15 @@ class FragmentMetadata { /** * Loads the generic tile offsets from the buffer. Applicable to - * versions 16 or higher. + * versions 16 to 22. */ - void load_generic_tile_offsets_v16_or_higher(Deserializer& deserializer); + void load_generic_tile_offsets_v16_v22(Deserializer& deserializer); + + /** + * Loads the generic tile offsets from the buffer. Applicable to + * versions 23 or higher. + */ + void load_generic_tile_offsets_v23_or_higher(Deserializer& deserializer); /** * Loads the array schema name. From 677fa62b5f22cf10a7574c3769d84adcb43be322 Mon Sep 17 00:00:00 2001 From: Ryan Roelke Date: Thu, 6 Nov 2025 14:44:50 -0500 Subject: [PATCH 53/53] Restore Dimension specialization --- test/support/src/array_schema_templates.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/support/src/array_schema_templates.h b/test/support/src/array_schema_templates.h index bd2b77059a8..f1a5503e9ea 100644 --- a/test/support/src/array_schema_templates.h +++ b/test/support/src/array_schema_templates.h @@ -158,6 +158,17 @@ struct Dimension { } }; +template <> +struct Dimension { + using value_type = StringDimensionCoordType; + + static constexpr tiledb::sm::Datatype DATATYPE = + tiledb::sm::Datatype::STRING_ASCII; + + Dimension() { + } +}; + template struct static_attribute {};