Skip to content

Commit 74e775a

Browse files
authored
Revert improve readers by parallelizing I/O and compute operations. (#5471)
This reverts #5401 for parallelizing I/O and compute operations (backported in #5451) and the follow-up PR #5446 (backported in #5458). --- TYPE: IMPROVEMENT DESC: Revert improve readers by parallelizing I/O and compute operations
1 parent c1428f9 commit 74e775a

23 files changed

+144
-420
lines changed

test/src/unit-ReadCellSlabIter.cc

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -183,10 +183,7 @@ void set_result_tile_dim(
183183
std::nullopt,
184184
std::nullopt,
185185
std::nullopt);
186-
ResultTile::TileData tile_data{
187-
{nullptr, ThreadPool::SharedTask()},
188-
{nullptr, ThreadPool::SharedTask()},
189-
{nullptr, ThreadPool::SharedTask()}};
186+
ResultTile::TileData tile_data{nullptr, nullptr, nullptr};
190187
result_tile.init_coord_tile(
191188
constants::format_version,
192189
array_schema,

test/src/unit-cppapi-consolidation-with-timestamps.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -636,7 +636,7 @@ TEST_CASE_METHOD(
636636

637637
// Will only allow to load two tiles out of 3.
638638
Config cfg;
639-
cfg.set("sm.mem.total_budget", "50000");
639+
cfg.set("sm.mem.total_budget", "30000");
640640
cfg.set("sm.mem.reader.sparse_global_order.ratio_coords", "0.15");
641641
ctx_ = Context(cfg);
642642

@@ -685,7 +685,7 @@ TEST_CASE_METHOD(
685685

686686
// Will only allow to load two tiles out of 3.
687687
Config cfg;
688-
cfg.set("sm.mem.total_budget", "50000");
688+
cfg.set("sm.mem.total_budget", "30000");
689689
cfg.set("sm.mem.reader.sparse_global_order.ratio_coords", "0.15");
690690
ctx_ = Context(cfg);
691691

test/src/unit-result-tile.cc

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -213,10 +213,7 @@ TEST_CASE_METHOD(
213213
0,
214214
std::nullopt,
215215
std::nullopt);
216-
ResultTile::TileData tile_data{
217-
{nullptr, ThreadPool::SharedTask()},
218-
{nullptr, ThreadPool::SharedTask()},
219-
{nullptr, ThreadPool::SharedTask()}};
216+
ResultTile::TileData tile_data{nullptr, nullptr, nullptr};
220217
rt.init_coord_tile(
221218
constants::format_version,
222219
array_schema,
@@ -233,10 +230,7 @@ TEST_CASE_METHOD(
233230
0,
234231
std::nullopt,
235232
std::nullopt);
236-
ResultTile::TileData tile_data{
237-
{nullptr, ThreadPool::SharedTask()},
238-
{nullptr, ThreadPool::SharedTask()},
239-
{nullptr, ThreadPool::SharedTask()}};
233+
ResultTile::TileData tile_data{nullptr, nullptr, nullptr};
240234
rt.init_coord_tile(
241235
constants::format_version,
242236
array_schema,
@@ -332,10 +326,7 @@ TEST_CASE_METHOD(
332326
0,
333327
std::nullopt,
334328
std::nullopt);
335-
ResultTile::TileData tile_data{
336-
{nullptr, ThreadPool::SharedTask()},
337-
{nullptr, ThreadPool::SharedTask()},
338-
{nullptr, ThreadPool::SharedTask()}};
329+
ResultTile::TileData tile_data{nullptr, nullptr, nullptr};
339330
rt.init_coord_tile(
340331
constants::format_version,
341332
array_schema,
@@ -352,10 +343,7 @@ TEST_CASE_METHOD(
352343
0,
353344
std::nullopt,
354345
std::nullopt);
355-
ResultTile::TileData tile_data{
356-
{nullptr, ThreadPool::SharedTask()},
357-
{nullptr, ThreadPool::SharedTask()},
358-
{nullptr, ThreadPool::SharedTask()}};
346+
ResultTile::TileData tile_data{nullptr, nullptr, nullptr};
359347
rt.init_coord_tile(
360348
constants::format_version,
361349
array_schema,

test/src/unit-sparse-global-order-reader.cc

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2224,10 +2224,9 @@ TEST_CASE_METHOD(
22242224
}
22252225

22262226
// FIXME: there is no per fragment budget anymore
2227-
// Two result tiles (2 * (2842 + 8)) = 5700 will be bigger than the per
2228-
// fragment budget (50000 * 0.11 / 2 fragments = 2750), so only one result
2229-
// tile will be loaded each time.
2230-
memory_.total_budget_ = "60000";
2227+
// Two result tile (2 * (~3000 + 8) will be bigger than the per fragment
2228+
// budget (1000).
2229+
memory_.total_budget_ = "35000";
22312230
memory_.ratio_coords_ = "0.11";
22322231
update_config();
22332232

@@ -2750,9 +2749,8 @@ TEST_CASE_METHOD(
27502749
}
27512750

27522751
// FIXME: there is no per fragment budget anymore
2753-
// Two result tiles (2 * (2842 + 8)) = 5700 will be bigger than the per
2754-
// fragment budget (40000 * 0.22 /2 frag = 4400), so only one will be loaded
2755-
// each time.
2752+
// Two result tile (2 * (~4000 + 8) will be bigger than the per fragment
2753+
// budget (1000).
27562754
memory_.total_budget_ = "40000";
27572755
memory_.ratio_coords_ = "0.22";
27582756
update_config();

test/src/unit-sparse-unordered-with-dups-reader.cc

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1064,12 +1064,9 @@ TEST_CASE_METHOD(
10641064

10651065
if (one_frag) {
10661066
CHECK(1 == loop_num->second);
1067+
} else {
1068+
CHECK(9 == loop_num->second);
10671069
}
1068-
/**
1069-
* We can't do a similar check for multiple fragments as it is architecture
1070-
* dependent how many tiles fit in the memory budget. And thus also
1071-
* architecture dependent as to how many internal loops we have.
1072-
*/
10731070

10741071
// Try to read multiple frags without partial tile offset reading. Should
10751072
// fail

tiledb/sm/filter/compression_filter.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -636,7 +636,7 @@ Status CompressionFilter::decompress_var_string_coords(
636636
auto output_view = span<std::byte>(
637637
reinterpret_cast<std::byte*>(output_buffer->data()), uncompressed_size);
638638
auto offsets_view = span<uint64_t>(
639-
offsets_tile->data_as_unsafe<offsets_t>(), uncompressed_offsets_size);
639+
offsets_tile->data_as<offsets_t>(), uncompressed_offsets_size);
640640

641641
if (compressor_ == Compressor::RLE) {
642642
uint8_t rle_len_bytesize, string_len_bytesize;

tiledb/sm/filter/filter_pipeline.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -461,7 +461,7 @@ Status FilterPipeline::run_reverse(
461461
// If the pipeline is empty, just copy input to output.
462462
if (filters_.empty()) {
463463
void* output_chunk_buffer =
464-
tile->data_as_unsafe<char>() + chunk_data.chunk_offsets_[i];
464+
tile->data_as<char>() + chunk_data.chunk_offsets_[i];
465465
RETURN_NOT_OK(input_data.copy_to(output_chunk_buffer));
466466
continue;
467467
}
@@ -484,7 +484,7 @@ Status FilterPipeline::run_reverse(
484484
bool last_filter = filter_idx == 0;
485485
if (last_filter) {
486486
void* output_chunk_buffer =
487-
tile->data_as_unsafe<char>() + chunk_data.chunk_offsets_[i];
487+
tile->data_as<char>() + chunk_data.chunk_offsets_[i];
488488
RETURN_NOT_OK(output_data.set_fixed_allocation(
489489
output_chunk_buffer, chunk.unfiltered_data_size_));
490490
reader_stats->add_counter(

tiledb/sm/filter/test/filter_test_support.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,8 +203,7 @@ Tile create_tile_for_unfiltering(
203203
tile->cell_size() * nelts,
204204
tile->filtered_buffer().data(),
205205
tile->filtered_buffer().size(),
206-
tracker,
207-
std::nullopt};
206+
tracker};
208207
}
209208

210209
void run_reverse(

tiledb/sm/filter/test/tile_data_generator.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,8 +99,7 @@ class TileDataGenerator {
9999
original_tile_size(),
100100
filtered_buffer.data(),
101101
filtered_buffer.size(),
102-
memory_tracker,
103-
std::nullopt);
102+
memory_tracker);
104103
}
105104

106105
/** Returns the size of the original unfiltered data. */

tiledb/sm/metadata/test/unit_metadata.cc

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -123,8 +123,7 @@ TEST_CASE(
123123
tile1->size(),
124124
tile1->filtered_buffer().data(),
125125
tile1->filtered_buffer().size(),
126-
tracker,
127-
ThreadPool::SharedTask());
126+
tracker);
128127
memcpy(metadata_tiles[0]->data(), tile1->data(), tile1->size());
129128

130129
metadata_tiles[1] = tdb::make_shared<Tile>(
@@ -136,8 +135,7 @@ TEST_CASE(
136135
tile2->size(),
137136
tile2->filtered_buffer().data(),
138137
tile2->filtered_buffer().size(),
139-
tracker,
140-
ThreadPool::SharedTask());
138+
tracker);
141139
memcpy(metadata_tiles[1]->data(), tile2->data(), tile2->size());
142140

143141
metadata_tiles[2] = tdb::make_shared<Tile>(
@@ -149,8 +147,7 @@ TEST_CASE(
149147
tile3->size(),
150148
tile3->filtered_buffer().data(),
151149
tile3->filtered_buffer().size(),
152-
tracker,
153-
ThreadPool::SharedTask());
150+
tracker);
154151
memcpy(metadata_tiles[2]->data(), tile3->data(), tile3->size());
155152

156153
meta = Metadata::deserialize(metadata_tiles);

0 commit comments

Comments
 (0)