Skip to content

Commit 58ba094

Browse files
committed
Add vvd writing support to convert binary
1 parent bf35a9a commit 58ba094

File tree

7 files changed

+218
-7
lines changed

7 files changed

+218
-7
lines changed

Cargo.lock

Lines changed: 5 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

convert/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,5 +9,6 @@ palace-core = { path = "../palace-core" }
99
palace-io = { path = "../palace-io" }
1010
palace-zarr = { path = "../palace-zarr" }
1111
palace-hdf5 = { path = "../palace-hdf5" }
12+
palace-vvd = { path = "../palace-vvd" }
1213
clap = { version = "4.0", features = ["derive"] }
1314
bytesize = "1.3.0"

convert/src/main.rs

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,10 @@ use palace_core::{
55
dim::{DDyn, DynDimension},
66
dtypes::{DType, ScalarType},
77
operators::{
8-
procedural, rechunk::ChunkSize, resample::DownsampleStep, tensor::LODTensorOperator,
8+
procedural,
9+
rechunk::ChunkSize,
10+
resample::DownsampleStep,
11+
tensor::{EmbeddedTensorOperator, LODTensorOperator},
912
},
1013
runtime::RunTime,
1114
vec::Vector,
@@ -26,6 +29,7 @@ enum FileMode {
2629
enum FileFormat {
2730
Zarr,
2831
HDF5,
32+
VVD,
2933
}
3034

3135
#[derive(Parser)]
@@ -123,6 +127,7 @@ fn parse_file_type(path: &Path) -> Result<FileFormat, String> {
123127
match segments[..] {
124128
[.., "zarr"] | [.., "zarr", "zip"] => Ok(FileFormat::Zarr),
125129
[.., "h5"] | [.., "hdf5"] => Ok(FileFormat::HDF5),
130+
[.., "vvd"] => Ok(FileFormat::VVD),
126131
_ => Err(format!("Unknown file format in file {}", path).into()),
127132
}
128133
}
@@ -311,6 +316,24 @@ fn main() {
311316
}
312317
}
313318
}
319+
FileFormat::VVD => match args.mode {
320+
FileMode::Single => {
321+
let v: EmbeddedTensorOperator<palace_core::dim::D3, _> = input
322+
.clone()
323+
.try_into_static()
324+
.ok_or_else(|| format!("vvd format only supports volumes (3D tensors)."))
325+
.unwrap();
326+
let v = &v;
327+
runtime.resolve(None, false, |ctx, _| {
328+
async move { palace_vvd::save_embedded_tensor(ctx, &args.output_path, v).await }
329+
.into()
330+
})
331+
}
332+
333+
FileMode::Lod { .. } => {
334+
panic!("vvd format does not support lod volumes.");
335+
}
336+
},
314337
}
315338
.unwrap();
316339
}

palace-core/src/data.rs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@ use crate::dim::*;
88
pub use crate::mat::*;
99
pub use crate::vec::*;
1010

11-
fn dimension_order_stride<D: DynDimension, T: CoordinateType>(
11+
pub fn dimension_order_stride<D: DynDimension, T: CoordinateType>(
1212
mem_size: &Vector<D, Coordinate<T>>,
13-
) -> D::NDArrayDimDyn {
13+
) -> Vector<D, usize> {
1414
let nd = mem_size.len();
1515
let mem_size = mem_size.as_index();
1616
let mut out = Vector::<D, usize>::fill_with_len(1usize, nd);
@@ -19,7 +19,7 @@ fn dimension_order_stride<D: DynDimension, T: CoordinateType>(
1919
out[i] = rol;
2020
rol *= mem_size[i];
2121
}
22-
D::to_ndarray_dim_dyn(out.inner())
22+
out
2323
}
2424
pub fn contiguous_shape<D: DynDimension, T: CoordinateType>(
2525
size: &Vector<D, Coordinate<T>>,
@@ -32,6 +32,7 @@ pub fn stride_shape<D: DynDimension, T: CoordinateType>(
3232
) -> ndarray::StrideShape<D::NDArrayDimDyn> {
3333
use ndarray::ShapeBuilder;
3434
let stride = dimension_order_stride(mem_size);
35+
let stride = D::to_ndarray_dim_dyn(stride.inner());
3536

3637
let size: ndarray::Shape<D::NDArrayDimDyn> = size.to_ndarray_dim().into();
3738
size.strides(stride)

palace-core/src/vec.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -754,6 +754,9 @@ impl<D: DynDimension, T: CoordinateType> Vector<D, Coordinate<T>> {
754754
pub fn hmul(&self) -> usize {
755755
self.iter().map(|v| v.raw as usize).product()
756756
}
757+
pub fn hadd(&self) -> usize {
758+
self.iter().map(|v| v.raw as usize).sum()
759+
}
757760
}
758761
impl<D: DynDimension> Vector<D, u32> {
759762
pub fn hmul(&self) -> usize {

palace-vvd/Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,7 @@ license = "MPL-2.0"
1010
palace-core = { path = "../palace-core" }
1111
sxd-xpath = "0.4"
1212
sxd-document = "0.3"
13+
memmap = "0.7.0"
14+
ndarray = "0.16"
15+
futures = "0.3.30"
16+
itertools = "0.13.0"

palace-vvd/src/lib.rs

Lines changed: 177 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,22 @@
1+
use futures::StreamExt;
2+
use itertools::Itertools;
3+
use ndarray::ShapeBuilder;
14
use sxd_document::*;
25
use sxd_xpath::evaluate_xpath;
36

47
use std::path::{Path, PathBuf};
58

69
use palace_core::{
710
array::{TensorEmbeddingData, VolumeMetaData},
8-
data::{Vector, VoxelPosition},
11+
data::{GlobalCoordinate, Vector, VoxelPosition},
912
dim::*,
10-
dtypes::{DType, ScalarType},
13+
dtypes::{DType, ElementType, ScalarType},
1114
jit::jit,
1215
operators::{
1316
rechunk::ChunkSize,
14-
tensor::{EmbeddedVolumeOperator, TensorOperator},
17+
tensor::{EmbeddedTensorOperator, EmbeddedVolumeOperator, TensorOperator},
1518
},
19+
task::{OpaqueTaskContext, RequestStream},
1620
transfunc::TransFuncOperator,
1721
Error,
1822
};
@@ -218,3 +222,173 @@ pub fn load_tfi(path: &Path) -> Result<TransFuncOperator, Error> {
218222
},
219223
))
220224
}
225+
226+
fn write_vvd(
227+
out: &mut dyn std::io::Write,
228+
raw_file: &str,
229+
dtype: DType,
230+
dimensions: Vector<D3, GlobalCoordinate>,
231+
spacing: Vector<D3, f32>,
232+
) -> Result<(), palace_core::Error> {
233+
if dtype.size != 1 {
234+
return Err(format!(
235+
"Only scalar dtypes are supported, but dtype with size {} given",
236+
dtype.size
237+
)
238+
.into());
239+
}
240+
let format = {
241+
let this = &dtype.scalar;
242+
match this {
243+
ScalarType::U8 => "uint8",
244+
ScalarType::I8 => "int8",
245+
ScalarType::U16 => "uint16",
246+
ScalarType::I16 => "int16",
247+
ScalarType::U32 => "uint32",
248+
ScalarType::I32 => "int32",
249+
ScalarType::U64 => "uint64",
250+
ScalarType::I64 => "int64",
251+
ScalarType::F32 => "float",
252+
}
253+
};
254+
255+
let dim = dimensions.raw();
256+
257+
write!(
258+
out,
259+
r#"<?xml version="1.0" ?>
260+
<VoreenData version="1">
261+
<Volumes>
262+
<Volume>
263+
<RawData format="{format}" x="{dim_x}" y="{dim_y}" z="{dim_z}">
264+
<Paths noPathSet="false">
265+
<paths>
266+
<item value="{raw_file}" />
267+
</paths>
268+
</Paths>
269+
</RawData>
270+
<MetaData>
271+
<MetaItem name="Offset" type="Vec3MetaData">
272+
<value x="0" y="0" z="0" />
273+
</MetaItem>
274+
<MetaItem name="Spacing" type="Vec3MetaData">
275+
<value x="{spacing_x}" y="{spacing_y}" z="{spacing_z}" />
276+
</MetaItem>
277+
</MetaData>
278+
</Volume>
279+
</Volumes>
280+
</VoreenData>"#,
281+
format = format,
282+
dim_x = dim.x(),
283+
dim_y = dim.y(),
284+
dim_z = dim.z(),
285+
spacing_x = spacing.x(),
286+
spacing_y = spacing.y(),
287+
spacing_z = spacing.z(),
288+
)?;
289+
290+
Ok(())
291+
}
292+
293+
pub async fn save_embedded_tensor<'cref, 'inv>(
294+
ctx: OpaqueTaskContext<'cref, 'inv>,
295+
path: &Path,
296+
t: &'inv EmbeddedTensorOperator<D3, DType>,
297+
) -> Result<(), palace_core::Error> {
298+
let raw_path = path.with_extension("raw");
299+
let mut vvd_out = std::fs::File::options()
300+
.truncate(true)
301+
.write(true)
302+
.create(true)
303+
.open(path)?;
304+
305+
let raw_file_name = raw_path.file_name().unwrap();
306+
let raw_file_name = raw_file_name.to_str().unwrap();
307+
write_vvd(
308+
&mut vvd_out,
309+
raw_file_name,
310+
t.dtype(),
311+
t.metadata.dimensions,
312+
t.embedding_data.spacing,
313+
)?;
314+
315+
let file_size = t
316+
.dtype()
317+
.array_layout(t.metadata.num_tensor_elements())
318+
.size();
319+
let mut out_file = std::fs::File::options()
320+
.truncate(true)
321+
.write(true)
322+
.read(true)
323+
.create(true)
324+
.open(raw_path)?;
325+
out_file.set_len(file_size as u64)?;
326+
let mut out_file = unsafe { memmap::MmapMut::map_mut(&mut out_file) }?;
327+
let out_file_ptr = out_file.as_mut_ptr();
328+
329+
let num_dtype_bytes = t.dtype().element_layout().size();
330+
let out_dim = t
331+
.metadata
332+
.dimensions
333+
.push_dim_small(num_dtype_bytes.try_into().unwrap());
334+
let stride = palace_core::data::dimension_order_stride(&out_dim);
335+
336+
let md = &t.metadata.push_dim_small(
337+
num_dtype_bytes.try_into().unwrap(),
338+
num_dtype_bytes.try_into().unwrap(),
339+
);
340+
341+
let num_total = md.dimension_in_chunks().hmul();
342+
println!("{} chunks to save", num_total);
343+
344+
let request_chunk_size = 1024;
345+
let chunk_ids_in_parts = md.chunk_indices().chunks(request_chunk_size);
346+
let mut i = 0;
347+
for chunk_ids in &chunk_ids_in_parts {
348+
let requests = chunk_ids.map(|chunk_id| (t.chunks.request_raw(chunk_id), chunk_id));
349+
let stream =
350+
ctx.submit_unordered_with_data(requests)
351+
.then_req(ctx, |(chunk_handle, chunk_id)| {
352+
let chunk_info = md.chunk_info(chunk_id);
353+
354+
let begin = chunk_info.begin();
355+
let start_offset = (*begin * stride).hadd();
356+
357+
let start_ptr =
358+
unsafe { out_file_ptr.offset(start_offset.try_into().unwrap()) };
359+
360+
let stride = D4::to_ndarray_dim_dyn(stride.inner());
361+
362+
let size: ndarray::Shape<
363+
<palace_core::dim::D4 as palace_core::dim::DynDimension>::NDArrayDimDyn,
364+
> = chunk_info.logical_dimensions.to_ndarray_dim().into();
365+
let shape = size.strides(stride);
366+
367+
let mut chunk_view_out =
368+
unsafe { ndarray::ArrayViewMut::from_shape_ptr(shape, start_ptr) };
369+
370+
let chunk_handle = chunk_handle.into_thread_handle();
371+
ctx.spawn_compute(move || {
372+
let chunk_view_in =
373+
palace_core::data::chunk(chunk_handle.data(), &chunk_info);
374+
375+
chunk_view_out.assign(&chunk_view_in);
376+
377+
chunk_handle
378+
})
379+
});
380+
futures::pin_mut!(stream);
381+
while let Some(handle) = stream.next().await {
382+
let _handle = handle.into_main_handle(ctx.storage());
383+
}
384+
i += request_chunk_size;
385+
println!(
386+
"{}/{}, {}%",
387+
i,
388+
num_total,
389+
i as f32 / num_total as f32 * 100.0
390+
);
391+
}
392+
393+
Ok(())
394+
}

0 commit comments

Comments
 (0)