Skip to content

Commit 4e33aac

Browse files
committed
(wip) get timing
1 parent 0411e63 commit 4e33aac

File tree

2 files changed

+189
-1
lines changed

2 files changed

+189
-1
lines changed

tests/integration/CMakeLists.txt

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ set(tests
2222
stream-pure-hcs-acquisition
2323
stream-mixed-flat-and-hcs-acquisition
2424
stream-with-ragged-final-shard
25+
get-timing-info
2526
)
2627

2728
foreach (name ${tests})
@@ -54,5 +55,7 @@ foreach (name ${tests})
5455
list(APPEND test_labels "s3")
5556
endif ()
5657

57-
set_tests_properties(test-${tgt} PROPERTIES LABELS "${test_labels}")
58+
if (NOT name MATCHES ".*timing.*")
59+
set_tests_properties(test-${tgt} PROPERTIES LABELS "${test_labels}")
60+
endif ()
5861
endforeach ()
Lines changed: 185 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,185 @@
1+
#include "acquire.zarr.h"
2+
#include "test.macros.hh"
3+
4+
#include <nlohmann/json.hpp>
5+
6+
#include <chrono>
7+
#include <filesystem>
8+
#include <fstream>
9+
#include <iostream>
10+
#include <vector>
11+
12+
namespace fs = std::filesystem;
13+
14+
namespace {
15+
constexpr uint32_t frame_size = 2048;
16+
const std::vector<uint8_t>
17+
frame_data(frame_size* frame_size, 0);
18+
19+
const std::vector<uint32_t> chunk_sizes{ 32, 64, 128, 256 };
20+
const std::vector<uint32_t> chunks_per_shard{ 8, 16, 32, 64 };
21+
const std::vector<uint32_t> layers_per_shard{ 1, 2, 4, 8, 16 };
22+
23+
} // namespace
24+
25+
ZarrStream*
26+
make_stream(uint32_t chunk_size,
27+
uint32_t n_chunks_per_shard,
28+
uint32_t n_layers_per_shard)
29+
{
30+
ZarrStreamSettings settings{ .store_path = TEST ".zarr",
31+
.version = ZarrVersion_3,
32+
.overwrite = true };
33+
34+
EXPECT(ZarrStreamSettings_create_arrays(&settings, 1) ==
35+
ZarrStatusCode_Success,
36+
"Failed to create array settings");
37+
EXPECT(ZarrArraySettings_create_dimension_array(settings.arrays, 5) ==
38+
ZarrStatusCode_Success,
39+
"Failed to create dimension array");
40+
41+
settings.arrays->data_type = ZarrDataType_uint8;
42+
43+
settings.arrays[0].dimensions[0] =
44+
DIM("t", ZarrDimensionType_Time, 0, 1, n_layers_per_shard, nullptr, 1.0);
45+
settings.arrays[0].dimensions[1] =
46+
DIM("c", ZarrDimensionType_Channel, 1, 1, 1, nullptr, 1.0);
47+
settings.arrays[0].dimensions[2] = DIM("z",
48+
ZarrDimensionType_Space,
49+
chunk_sizes.back(),
50+
chunk_size,
51+
n_chunks_per_shard,
52+
"millimeter",
53+
1.0);
54+
settings.arrays[0].dimensions[3] = DIM("y",
55+
ZarrDimensionType_Space,
56+
frame_size,
57+
chunk_size,
58+
n_chunks_per_shard,
59+
"micrometer",
60+
1.0);
61+
settings.arrays[0].dimensions[4] = DIM("x",
62+
ZarrDimensionType_Space,
63+
frame_size,
64+
chunk_size,
65+
n_chunks_per_shard,
66+
"micrometer",
67+
1.0);
68+
69+
auto* stream = ZarrStream_create(&settings);
70+
71+
// cleanup
72+
ZarrStreamSettings_destroy_arrays(&settings);
73+
74+
return stream;
75+
}
76+
77+
int
78+
main()
79+
{
80+
int retval = 1;
81+
nlohmann::json results_arr = nlohmann::json::array();
82+
ZarrStream* stream = nullptr;
83+
84+
try {
85+
for (auto& layers : layers_per_shard) {
86+
for (auto& cps : chunks_per_shard) {
87+
for (auto& chunk_size : chunk_sizes) {
88+
const size_t chunks_xyz =
89+
(frame_size + chunk_size - 1) / chunk_size;
90+
if (cps > chunks_xyz) {
91+
continue;
92+
}
93+
94+
const auto n_chunks = chunks_xyz * chunks_xyz * chunks_xyz;
95+
const auto n_frames = chunk_size * cps * layers;
96+
97+
nlohmann::json j;
98+
j["chunk_size"] = chunk_size;
99+
j["chunks_per_shard"] = cps;
100+
j["layers_per_shard"] = layers;
101+
j["n_chunks"] = n_chunks;
102+
j["frames_written"] = n_frames;
103+
104+
std::cout
105+
<< "Testing chunk size " << chunk_size
106+
<< ", chunks per shard " << cps << ", layers per shard "
107+
<< layers << ", chunk count " << n_chunks << " ("
108+
<< n_frames << " frames)... " << std::flush << std::endl;
109+
110+
stream = make_stream(chunk_size, cps, layers);
111+
EXPECT(stream != nullptr, "Failed to create stream");
112+
113+
auto start = std::chrono::high_resolution_clock::now();
114+
115+
for (auto i = 0; i < n_frames; ++i) {
116+
size_t bytes_written = 0;
117+
ZarrStatusCode status =
118+
ZarrStream_append(stream,
119+
frame_data.data(),
120+
frame_data.size(),
121+
&bytes_written,
122+
nullptr);
123+
EXPECT(status == ZarrStatusCode_Success,
124+
"Failed to append frame ",
125+
i,
126+
", status code ",
127+
int(status));
128+
EXPECT(bytes_written == frame_data.size(),
129+
"Expected to write ",
130+
frame_data.size(),
131+
" bytes, but wrote ",
132+
bytes_written);
133+
std::cout << "." << std::flush;
134+
}
135+
auto end_append = std::chrono::high_resolution_clock::now();
136+
std::chrono::duration<double> elapsed_append =
137+
end_append - start;
138+
139+
std::cout << "\nFinalizing... " << std::flush;
140+
ZarrStream_destroy(stream);
141+
stream = nullptr;
142+
std::cout << "done." << std::endl;
143+
144+
auto end_destroy =
145+
std::chrono::high_resolution_clock::now();
146+
std::chrono::duration<double> elapsed_destroy =
147+
end_destroy - start;
148+
149+
const double fps = n_frames / elapsed_append.count();
150+
151+
j["elapsed_time_append"] = elapsed_append.count();
152+
j["elapsed_time_destroy"] = elapsed_destroy.count();
153+
154+
std::cout
155+
<< "Wrote " << n_frames << " frames in "
156+
<< elapsed_append.count() << " seconds (" << fps
157+
<< " fps); time to destroy: " << elapsed_destroy.count()
158+
<< " seconds" << std::endl;
159+
160+
results_arr.push_back(j);
161+
}
162+
}
163+
}
164+
165+
retval = 0;
166+
} catch (const std::exception& err) {
167+
LOG_ERROR("Failed: ", err.what());
168+
}
169+
170+
// write out results to file
171+
std::ofstream results_file(TEST "-timing-results.json");
172+
results_file << results_arr.dump(2) << "\n";
173+
results_file.close();
174+
175+
// cleanup
176+
if (stream != nullptr) {
177+
ZarrStream_destroy(stream);
178+
}
179+
180+
if (fs::exists(TEST ".zarr")) {
181+
fs::remove_all(TEST ".zarr");
182+
}
183+
184+
return retval;
185+
}

0 commit comments

Comments
 (0)