diff --git a/.coverage b/.coverage index c6fe2e9..6515a9c 100644 Binary files a/.coverage and b/.coverage differ diff --git a/python/Dockerfile b/python/Dockerfile index fac80f8..c409c5a 100644 --- a/python/Dockerfile +++ b/python/Dockerfile @@ -8,7 +8,8 @@ ENV POETRY_NO_INTERACTION=1 \ RUN apt-get update -y RUN apt-get install \ gcc \ - libpq-dev + libpq-dev \ + cargo WORKDIR /app diff --git a/python/Dockerfile-prod b/python/Dockerfile-prod index f76ee15..0e55fd7 100644 --- a/python/Dockerfile-prod +++ b/python/Dockerfile-prod @@ -7,7 +7,8 @@ FROM thehale/python-poetry:2.1.3-py3.11-slim as python-base RUN apt-get update -y RUN apt-get install -y \ gcc \ - libpq-dev + libpq-dev \ + cargo COPY ./dist/*.whl ./ diff --git a/python/ouroboros/helpers/files.py b/python/ouroboros/helpers/files.py index a8cb17e..2ae0de5 100644 --- a/python/ouroboros/helpers/files.py +++ b/python/ouroboros/helpers/files.py @@ -1,4 +1,5 @@ from functools import partial +from io import BytesIO from multiprocessing.pool import ThreadPool import os @@ -6,7 +7,6 @@ from numpy.typing import ArrayLike from pathlib import Path import cv2 -from tifffile import TiffWriter, TiffFile import time from .shapes import DataShape @@ -171,60 +171,60 @@ def generate_tiff_write(write_func: callable, compression: str | None, micron_re **kwargs) -def write_small_intermediate(file_path: os.PathLike, *series): - with TiffWriter(file_path, append=True) as tif: - for entry in series: - tif.write(entry, dtype=entry.dtype) +def write_raw_intermediate(target: BytesIO, *series): + for entry in series: + target.write(entry) + return target.tell() def ravel_map_2d(index, source_rows, target_rows, offset): return np.add.reduce(np.add(np.divmod(index, source_rows), offset) * ((target_rows, ), (np.uint32(1), ))) -def load_z_intermediate(path: Path, offset: int = 0): - with TiffFile(path) as tif: - meta = tif.series[offset].asarray() - source_rows, target_rows, offset_rows, offset_columns = meta - return (ravel_map_2d(tif.series[offset + 1].asarray(), - source_rows, target_rows, - ((offset_rows, ), (offset_columns, ))), - tif.series[offset + 2].asarray(), - tif.series[offset + 3].asarray()) +def load_raw_file_intermediate(handle: BytesIO): + meta = np.fromfile(handle, np.uint32, 6) + source_rows, target_rows, offset_rows, offset_columns, channel_count, data_length = meta + t_index, t_value, t_weight = [np.dtype(code.decode()).type for code in np.fromfile(handle, 'S8', 3)] + return (ravel_map_2d(np.fromfile(handle, t_index, data_length), + source_rows, target_rows, + ((offset_rows, ), (offset_columns, ))), + np.fromfile(handle, t_value, data_length * channel_count).reshape(-1, data_length), + np.fromfile(handle, t_weight, data_length)) def increment_volume(path: Path, vol: np.ndarray, offset: int = 0, cleanup=False): - indicies, values, weights = load_z_intermediate(path, offset) - for i in range(0, vol.shape[0] - 1): - np.add.at(vol[i], indicies, np.atleast_2d(values)[i]) - np.add.at(vol[-1], indicies, weights) + if isinstance(path, Path): + with open(path, "rb") as handle: + end = os.fstat(handle.fileno()).st_size + handle.seek(offset) + while handle.tell() < end: + indicies, values, weights = load_raw_file_intermediate(handle) + for i in range(0, vol.shape[0] - 1): + np.add.at(vol[i], indicies, np.atleast_2d(values)[i]) + np.add.at(vol[-1], indicies, weights) if cleanup: path.unlink() def volume_from_intermediates(path: Path, shape: DataShape, thread_count: int = 4): - vol = np.zeros((1 + shape.C, np.prod((shape.Y, shape.X))), dtype=np.float32) - with ThreadPool(thread_count) as pool: - if not path.exists(): - # We don't have any intermediate(s) for this value, so return empty. - return vol[0] - elif path.is_dir(): - pool.starmap(increment_volume, [(i, vol, 0, False) for i in path.glob("**/*.tif*")]) - else: - with TiffFile(path) as tif: - offset_set = range(0, len(tif.series), 4) - pool.starmap(increment_volume, [(path, vol, i, False) for i in offset_set]) + vol = np.zeros((2, np.prod((shape.Y, shape.X))), dtype=np.float32) + if path.is_dir(): + with ThreadPool(thread_count) as pool: + pool.starmap(increment_volume, [(i, vol, 0, True) for i in path.glob("**/*.tif*")]) + elif path.exists(): + increment_volume(path, vol, 0, True) - nz = np.flatnonzero(vol[-1]) - vol[:-1, nz] /= vol[-1, nz] - return vol[:-1] + nz = np.flatnonzero(vol[0]) + vol[0, nz] /= vol[1, nz] + return vol[0] def write_conv_vol(writer: callable, source_path, shape, dtype, scaling, target_folder, index, interpolation): perf = {} - start = time.perf_counter() + vol_start = time.perf_counter() vol = volume_from_intermediates(source_path, shape) - perf["Merge Volume"] = time.perf_counter() - start + perf["Merge Volume"] = time.perf_counter() - vol_start if scaling is not None: start = time.perf_counter() # CV2 is only 2D but we're resizing from the 1D image anyway at the moment. @@ -241,4 +241,5 @@ def write_conv_vol(writer: callable, source_path, shape, dtype, scaling, target_ writer(target_folder.joinpath(f"{index}.tif"), data=np_convert(dtype, vol.T.reshape(shape.Y, shape.X, shape.C), normalize=False, safe_bool=True)) perf["Write Merged"] = time.perf_counter() - start + perf["Total Chunk Merge"] = time.perf_counter() - vol_start return perf diff --git a/python/ouroboros/helpers/parse.py b/python/ouroboros/helpers/parse.py index c05b210..01b2173 100644 --- a/python/ouroboros/helpers/parse.py +++ b/python/ouroboros/helpers/parse.py @@ -11,10 +11,10 @@ class CV_FORMAT(Enum): PRECOMPUTED = ["neuroglancer-precomputed"] ZARR = ["zarr", "zarr2", "zarr3"] N5 = ["n5"] - + def __str__(self): return f"{self.name.lower()}://" - + @classmethod def get(cls, suffix): for e in cls: @@ -56,8 +56,8 @@ def parse_source(cls, value, handler: ValidatorFunctionWrapHandler) -> str: split_source = base_source.split("|") if len(split_source) > 1: kv_store = split_source[1].split(":") - base_source = f"{CV_FORMAT.get(kv_store[0])}{split_source[0]}{kv_store[1]}" - + base_source = f"{CV_FORMAT.get(kv_store[0])}{split_source[0]}{kv_store[1]}" + return SourceModel(url=base_source) if isinstance(source, SourceModel) else base_source diff --git a/python/ouroboros/helpers/shapes.py b/python/ouroboros/helpers/shapes.py index eacd73b..3f72280 100644 --- a/python/ouroboros/helpers/shapes.py +++ b/python/ouroboros/helpers/shapes.py @@ -2,7 +2,6 @@ Module containing shapes of data. """ from abc import ABC, abstractmethod -from collections import namedtuple from dataclasses import dataclass, asdict, replace, fields, astuple, make_dataclass, Field, InitVar from functools import cached_property, reduce import operator @@ -233,10 +232,6 @@ class Z(DataShape): Z: int # noqa: E701,E702 class GenericOrder(DataShape): A: int; B: int; C: int # noqa: E701,E702 -# ???? -NPString = namedtuple("NPString", 'T') - - @dataclass class DataRange(object): start: DataShape; stop: DataShape; step: DataShape # noqa: E701,E702 diff --git a/python/ouroboros/helpers/slice.py b/python/ouroboros/helpers/slice.py index 1f9845e..e2a9400 100644 --- a/python/ouroboros/helpers/slice.py +++ b/python/ouroboros/helpers/slice.py @@ -219,7 +219,7 @@ def backproject_box(bounding_box: BoundingBox, slice_rects: np.ndarray, slices: np.add.at(volume[-1], points + point_inc, c_weights) # Get indicies of the flattened Z-Y-X backprojected domain that have values. - nz_vol = np.flatnonzero(volume[-1]) + nz_vol = np.flatnonzero(volume[-1]).astype(squish_type) # Return indicies and only the volume region with values. return nz_vol, volume[:-1, nz_vol].squeeze(), volume[-1, nz_vol].squeeze() diff --git a/python/ouroboros/pipeline/backproject_pipeline.py b/python/ouroboros/pipeline/backproject_pipeline.py index 2c7283b..5cef5be 100644 --- a/python/ouroboros/pipeline/backproject_pipeline.py +++ b/python/ouroboros/pipeline/backproject_pipeline.py @@ -11,6 +11,7 @@ import time import traceback +from filelock import FileLock import numpy as np from ouroboros.helpers.memory_usage import ( @@ -32,7 +33,7 @@ join_path, generate_tiff_write, write_conv_vol, - write_small_intermediate + write_raw_intermediate ) from ouroboros.helpers.shapes import DataRange, ImgSliceC @@ -127,19 +128,16 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]: straightened_volume_path = new_straightened_volume_path - # Write huge temp files (need to address) full_bounding_box = BoundingBox.bound_boxes(volume_cache.bounding_boxes) write_shape = np.flip(full_bounding_box.get_shape()).tolist() - print(f"\nFront Projection Shape: {FPShape}") - print(f"\nBack Projection Shape (Z/Y/X):{write_shape}") pipeline_input.output_file_path = (f"{config.output_file_name}_" f"{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}") folder_path = Path(config.output_file_folder, pipeline_input.output_file_path) folder_path.mkdir(exist_ok=True, parents=True) - i_path = Path(config.output_file_folder, - f"{config.output_file_name}_t_{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}") + # Intermediate Path + i_path = Path(config.output_file_folder, f"{os.getpid()}_{config.output_file_name}") if config.make_single_file: is_big_tiff = calculate_gigabytes_from_dimensions( @@ -189,12 +187,12 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]: for chunk, _, chunk_rects, _, index in chunk_range.get_iter(chunk_iter): bp_futures.append(executor.submit( process_chunk, - config, - straightened_volume_path, - chunk_rects, - chunk, - index, - full_bounding_box + config=config, + straightened_volume_path=straightened_volume_path, + chunk_rects=chunk_rects, + chunk=chunk, + index=index, + full_bounding_box=full_bounding_box )) # Track what's written. @@ -206,8 +204,8 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]: def note_written(write_future): nonlocal pages_written pages_written += 1 - self.update_progress((np.sum(processed) / len(chunk_range)) * (2 / 3) - + (pages_written / num_pages) * (1 / 3)) + self.update_progress((np.sum(processed) / len(chunk_range)) * (exec_procs / self.num_processes) + + (pages_written / num_pages) * (write_procs / self.num_processes)) for key, value in write_future.result().items(): self.add_timing(key, value) @@ -222,8 +220,8 @@ def note_written(write_future): # Update the progress bar processed[index] = 1 - self.update_progress((np.sum(processed) / len(chunk_range)) * (2 / 3) - + (pages_written / num_pages) * (1 / 3)) + self.update_progress((np.sum(processed) / len(chunk_range)) * (exec_procs / self.num_processes) + + (pages_written / num_pages) * (write_procs / self.num_processes)) update_writable_rects(processed, slice_rects, min_dim, writeable, DEFAULT_CHUNK_SIZE) @@ -233,14 +231,14 @@ def note_written(write_future): for index in write: write_futures.append(write_executor.submit( write_conv_vol, - tif_write(tifffile.imwrite), - i_path.joinpath(f"i_{index:05}"), - ImgSliceC(*write_shape[1:], channels), - bool if config.make_backprojection_binary else np.uint16, - scaling_factors, - folder_path, - index, - config.upsample_order + writer=tif_write(tifffile.imwrite), + source_path=i_path.joinpath(f"i_{index:05}.dat"), + shape=ImgSliceC(*write_shape[1:], channels), + dtype=bool if config.make_backprojection_binary else np.uint16, + scaling=scaling_factors, + target_folder=folder_path, + index=index, + interpolation=config.upsample_order )) write_futures[-1].add_done_callback(note_written) @@ -271,8 +269,7 @@ def note_written(write_future): if config.make_single_file: shutil.rmtree(folder_path) - shutil.rmtree(Path(config.output_file_folder, - f"{config.output_file_name}_t_{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}")) + shutil.rmtree(i_path) return None @@ -320,7 +317,7 @@ def process_chunk( if values.nbytes == 0: # No data to write from this chunk, so return as such. - durations["total_process"] = [time.perf_counter() - start_total] + durations["total_chunk_process"] = [time.perf_counter() - start_total] return durations, index, [] # Save the data @@ -336,7 +333,9 @@ def process_chunk( "target_rows": full_bounding_box.get_shape()[0], "offset_columns": offset[1], "offset_rows": offset[2], + "channel_count": np.uint32(1 if len(slices.shape) < 4 else slices.shape[-1]), } + type_ar = np.array([yx_vals.dtype.str, values.dtype.str, weights.dtype.str], dtype='S8') durations["split"] = [time.perf_counter() - start] # Gets slices off full array corresponding to each Z value. @@ -347,19 +346,24 @@ def process_chunk( durations["stack"] = [time.perf_counter() - start] start = time.perf_counter() - file_path = Path(config.output_file_folder, - f"{config.output_file_name}_t_{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}") - file_path.mkdir(exist_ok=True, parents=True) + i_path = Path(config.output_file_folder, f"{os.getppid()}_{config.output_file_name}") + i_path.mkdir(exist_ok=True, parents=True) - def write_z(i, z_slice): + def write_z(target, z_slice): + write_raw_intermediate(target, + np.fromiter(offset_dict.values(), dtype=np.uint32, count=5).tobytes(), + np.uint32(len(yx_vals[z_slice])).tobytes(), + type_ar.tobytes(), + yx_vals[z_slice].tobytes(), values[z_slice].tobytes(), weights[z_slice].tobytes()) + + def make_z(i, z_slice): offset_z = z_stack[i] + offset[0] - file_path.joinpath(f"i_{offset_z:05}").mkdir(exist_ok=True, parents=True) - write_small_intermediate(file_path.joinpath(f"i_{offset_z:05}", f"{index}.tif"), - np.fromiter(offset_dict.values(), dtype=np.uint32, count=4), - yx_vals[z_slice], np.atleast_2d(values)[:, z_slice], weights[z_slice]) + z_path = i_path.joinpath(f"i_{offset_z:05}.dat") + with FileLock(z_path.with_suffix(".lock")): + write_z(open(z_path, "ab"), z_slice) with ThreadPool(12) as pool: - pool.starmap(write_z, enumerate(z_slices)) + pool.starmap(make_z, enumerate(z_slices)) durations["write_intermediate"] = [time.perf_counter() - start] except BaseException as be: @@ -367,7 +371,7 @@ def write_z(i, z_slice): traceback.print_tb(be.__traceback__, file=sys.stderr) raise be - durations["total_process"] = [time.perf_counter() - start_total] + durations["total_chunk_process"] = [time.perf_counter() - start_total] return durations, index, z_stack + offset[0] diff --git a/python/poetry.lock b/python/poetry.lock index e4329e9..489bcc2 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "altgraph" @@ -1518,6 +1518,18 @@ files = [ [package.dependencies] numpy = "*" +[[package]] +name = "filelock" +version = "3.20.0" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2"}, + {file = "filelock-3.20.0.tar.gz", hash = "sha256:711e943b4ec6be42e1d4e6690b48dc175c822967466bb31c0c293f34334c13f4"}, +] + [[package]] name = "fonttools" version = "4.60.1" @@ -4572,43 +4584,29 @@ groups = ["main"] files = [ {file = "zfpy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1417ec8595c2ccb467d228410f7570b1e804bf042ffe84664d7851a267e03239"}, {file = "zfpy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:893aa8239a9cbae34f8277c6633c145f2dc11230f0f51a39c010a594a4581976"}, - {file = "zfpy-1.0.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55981215b0e4c447c3e715190d5c27e1f67af137088aff35fccee8bdb52a0eed"}, - {file = "zfpy-1.0.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4feea60d3bca2ae48456e2b38831d8715694072266bd4642ff738fb7e5512edc"}, {file = "zfpy-1.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910cac6c191f7bf9d100bd348486d2e53a35a2ba4aa7091020f7154b9f26454a"}, {file = "zfpy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b15012ed906b89b43474d5b1400a955348c6bc9cc4c216c2049c16c249a9b51f"}, {file = "zfpy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:a71c0f8c4bfba2433ad11332d6b8493ae661a1c01048292f5e539103a605ec2d"}, {file = "zfpy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:13146293a5132627feb2c9f7adc6d95c528dd19dc3260f000e4b6284b2696b1f"}, {file = "zfpy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:06d6d6a46fbd198c6fec7f31b111e11f5be3a254859f5ac3ab9488d05b240337"}, - {file = "zfpy-1.0.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96c8d3702ec6fdb40cd40c8c64c9e47fe7ba07ca63a0392d051fb109e44a85b0"}, - {file = "zfpy-1.0.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e0ad9b75ef5135bcd8ba8d0851dda828c6d183c5cd73577782a794387c001ec1"}, {file = "zfpy-1.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d204b17ee8c1eef6a2e650c123d627c0f64b3d8957df4c788dd2aaf2a72eb4ae"}, {file = "zfpy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0ea9d0bcf191ec345145592d2e7586413bcfd385034f85d00238e1eaa857185"}, {file = "zfpy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:78ff5cc656da9414e264bbdfb316a0c30bf930a426f9c2a1f1a5131620135439"}, {file = "zfpy-1.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bb83fa31c943475c4ed8182c0bd1db518beca319809612c9851d78545e7d31c9"}, {file = "zfpy-1.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d401da303fe926b18df1e93483a2099762d61db87597ccdaf89e25fb71b107cc"}, - {file = "zfpy-1.0.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c5289c100e9820723b1f78616a0e37ab32416c69b0c934e96f2b8d9138f54d5"}, - {file = "zfpy-1.0.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:33e9505c7457937912ed4a2356d802ba1898fe32d636e24f21c7072d03dc68fb"}, {file = "zfpy-1.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b5e7c1f0ebdecc6850e65004e7fbba8d5e78bf4b626654e463c3e0aa5b3fe54"}, {file = "zfpy-1.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58180c0917656ab0eb2dadad7e033e10271a3c615dc8972657137dba16e2bd42"}, {file = "zfpy-1.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:f15c91425cebca0ab5c88b3b2ba0eb800ec5310e3ea2a6b5f3e4b198a97288d9"}, {file = "zfpy-1.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:791fa42b651a26be67e52043b619e3a039862900e7080711d07a5de956158b8c"}, {file = "zfpy-1.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f811480ebff2e028c50bf3491d1d2e3869eb4388883543d0698f5fdc917e4386"}, - {file = "zfpy-1.0.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:665d3f55665a5b53642842f751f2dc1e7ec5a1307030f88b8137e63bb93c9e24"}, - {file = "zfpy-1.0.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7192425d69887a0759aa3f494f942fe5da7fd32debc1ec5ed44bbd754d8b486a"}, {file = "zfpy-1.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4892bc441ce59de98c4b3b29f04e44657aa6ccfe3ae5ca23660efb010b829d6"}, {file = "zfpy-1.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a64590ef3c182988f4ac5745ec6e1623b06c348583cbdb96d296e4899f4469"}, {file = "zfpy-1.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9a71cfce2fb7a82497e90e7152f847250664f30ca4dfedcca500c35d4c63e44"}, - {file = "zfpy-1.0.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ba62488da318a1e2a7497164823ac0899993560e41fbdfb0381b0f011b317f26"}, - {file = "zfpy-1.0.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:838926f46c6601bce70068b711d630f74cde89ab33d92851bfaa9ed22ae06009"}, - {file = "zfpy-1.0.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78dfd1ce01fc79e068be69a7e883203cf47f13806e6f65d66cb30e2b2a4bcb13"}, - {file = "zfpy-1.0.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:802633376e86ed19bb07cd8518dd445a81b81f89ef9ac2147f4429a95cd0a69f"}, - {file = "zfpy-1.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:5448056a32da0f5db4dd3a80b008d6d31a06ba1a6d44bf5cf3a112a238ca30ac"}, {file = "zfpy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46ec529b729b553604a3509cd0d6d0c28169f0be0cd7744f69f16324912d463b"}, {file = "zfpy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b717a14b3ae18fc0130d440706dc08cde9c8149423bb0df9f478f52a07a5ed6"}, {file = "zfpy-1.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf82232d4f6e0092f54a7bf2ec52ae70024f0a2839ce54b77fb2a29fb417791e"}, {file = "zfpy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c86a576d65557f83e2c758664ee96710c06352f8b79dca5867199e94ccfb2ce"}, {file = "zfpy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:2f0dea9281b6ce1623eb45316f000a0fdaad01459a01bc50dd866a5fff37ec7b"}, - {file = "zfpy-1.0.1.tar.gz", hash = "sha256:75c7014bdb2ad497a08846aaadca6d13de7c154a541c42557e52ec42030ca926"}, ] [[package]] @@ -4812,4 +4810,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt [metadata] lock-version = "2.1" python-versions = ">=3.11,<3.13" -content-hash = "6befa102e0ee25da5d46ca77f34981dd8cd12e5bfb86c61d35377a30b4d314fa" +content-hash = "1bf3f1761d13853aa420300e55e88f9778a98fd0b0c65a334c074770513ea13c" diff --git a/python/pyproject.toml b/python/pyproject.toml index 48a21f0..f609da6 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -29,6 +29,7 @@ sse-starlette = "^2.3.2" typing-extensions = "^4.14.0" uvicorn = "^0.29.0" opencv-python = "^4.0.0" +filelock = "^3.20.0" [tool.poetry.group.dev.dependencies] pytest = "^8.2.2" diff --git a/python/test/helpers/test_files.py b/python/test/helpers/test_files.py index 340c7ad..872381c 100644 --- a/python/test/helpers/test_files.py +++ b/python/test/helpers/test_files.py @@ -20,9 +20,9 @@ np_convert, generate_tiff_write, ravel_map_2d, - load_z_intermediate, increment_volume, - write_small_intermediate + load_raw_file_intermediate, + write_raw_intermediate ) @@ -223,24 +223,28 @@ def test_write_intermediate(tmp_path): "target_rows": np.uint32(60), "offset_columns": np.uint32(60), "offset_rows": np.uint32(40), + "channels": np.uint32(1) } + type_ar = np.array([raveled_source.dtype.str, source_values.dtype.str, source_weights.dtype.str], dtype='S8') - write_small_intermediate(sample_path, - np.fromiter(offset_dict.values(), dtype=np.uint32, count=4), - raveled_source, - source_values, - source_weights) + write_raw_intermediate(open(sample_path, "wb"), + np.fromiter(offset_dict.values(), dtype=np.uint32, count=5).tobytes(), + np.uint32(len(raveled_source)).tobytes(), + type_ar.tobytes(), + raveled_source.tobytes(), + source_values.tobytes(), + source_weights.tobytes()) - indicies, values, weights = load_z_intermediate(sample_path) + indicies, values, weights = load_raw_file_intermediate(open(sample_path, "rb")) assert len(indicies) == 100 - assert np.all(indicies == raveled_mapped) - assert len(values) == 100 + assert values.shape == (1, 100) assert values.dtype == np.float32 - assert np.all(values == source_values) assert len(weights) == 100 - assert np.all(weights == source_weights) assert weights.dtype == np.float32 + assert np.all(values[0] == source_values) + assert np.all(weights == source_weights) + assert np.all(indicies == raveled_mapped) def test_increment_volume(tmp_path): @@ -258,13 +262,17 @@ def test_increment_volume(tmp_path): "target_rows": np.uint32(60), "offset_columns": np.uint32(60), "offset_rows": np.uint32(40), + "channels": np.uint32(1) } - - write_small_intermediate(sample_path, - np.fromiter(offset_dict.values(), dtype=np.uint32, count=4), - raveled_source, - source_values, - source_weights) + type_ar = np.array([raveled_source.dtype.str, source_values.dtype.str, source_weights.dtype.str], dtype='S8') + + write_raw_intermediate(open(sample_path, "wb"), + np.fromiter(offset_dict.values(), dtype=np.uint32, count=5).tobytes(), + np.uint32(len(raveled_source)).tobytes(), + type_ar.tobytes(), + raveled_source.tobytes(), + source_values.tobytes(), + source_weights.tobytes()) volume = np.zeros((2, 80 * 60)) increment_volume(sample_path, volume[:], cleanup=True)