diff --git a/documentation/docs/guide/backproject.md b/documentation/docs/guide/backproject.md index c0f7099..08d2ecd 100644 --- a/documentation/docs/guide/backproject.md +++ b/documentation/docs/guide/backproject.md @@ -38,7 +38,7 @@ The offset of the minimum bounding box is stored in the output tiff's descriptio - 📁 `Slice Options File` - Path to the `-slice-options.json` file which includes the information needed for backprojection. - 📁 `Output File Folder` - The folder to save all the resulting files into. - `Output File Name` - Base name for all output files. -- `Output MIP Level` - The MIP level to output the backprojection in (essentially an upsample option). Use this if you downsampled in the slicing step. +- `Output MIP Level` - The MIP level to output the backprojection in (essentially an upsample option). Use this if you downsampled in the slicing step; downsampling changes are currently ignored. - `Upsample Order` - The interpolation order Ouroboros uses to interpolate values from a lower MIP level. If you check the binary option, feel free to set this to 0. - `Backprojection Compression` - The compression option to use for the backprojected tiff(s). Recommended options: `none`, `zlib`, `zstd`. - `Output Single File` - Whether to output one tiff stack file or a folder of files. diff --git a/python/ouroboros/helpers/files.py b/python/ouroboros/helpers/files.py index 0060797..b2e33bf 100644 --- a/python/ouroboros/helpers/files.py +++ b/python/ouroboros/helpers/files.py @@ -5,6 +5,7 @@ import numpy as np from numpy.typing import ArrayLike from pathlib import Path +import cv2 from tifffile import TiffWriter, TiffFile import time @@ -218,12 +219,25 @@ def volume_from_intermediates(path: Path, shape: DataShape, thread_count: int = return vol[0] -def write_conv_vol(writer: callable, source_path, shape, dtype, *args, **kwargs): +def write_conv_vol(writer: callable, source_path, shape, dtype, scaling, target_folder, index, interpolation): perf = {} start = time.perf_counter() vol = volume_from_intermediates(source_path, shape) perf["Merge Volume"] = time.perf_counter() - start - start = time.perf_counter() - writer(*args, data=np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True), **kwargs) - perf["Write Merged"] = time.perf_counter() - start + if scaling is not None: + start = time.perf_counter() + # CV2 is only 2D but we're resizing from the 1D image anyway at the moment. + new_volume = cv2.resize( + np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True), + None, fx=scaling[1], fy=scaling[2], interpolation=interpolation) + perf["Zoom"] = time.perf_counter() - start + start = time.perf_counter() + for i in range(round(scaling[0])): + writer(target_folder.joinpath(f"{(index * round(scaling[0]) + i):05}.tif"), data=new_volume) + perf["Write Merged"] = time.perf_counter() - start + else: + start = time.perf_counter() + writer(target_folder.joinpath(f"{index}"), + data=np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True)) + perf["Write Merged"] = time.perf_counter() - start return perf diff --git a/python/ouroboros/pipeline/backproject_pipeline.py b/python/ouroboros/pipeline/backproject_pipeline.py index baab382..b011154 100644 --- a/python/ouroboros/pipeline/backproject_pipeline.py +++ b/python/ouroboros/pipeline/backproject_pipeline.py @@ -12,16 +12,12 @@ import traceback import numpy as np -import scipy -import scipy.ndimage from ouroboros.helpers.memory_usage import ( - calculate_chunk_size, calculate_gigabytes_from_dimensions ) from ouroboros.helpers.slice import ( # noqa: F401 detect_color_channels_shape, - make_volume_binary, FrontProjStack, backproject_box, BackProjectIter @@ -32,11 +28,8 @@ from ouroboros.helpers.options import BackprojectOptions from ouroboros.helpers.files import ( format_backproject_resave_volume, - format_tiff_name, get_sorted_tif_files, join_path, - num_digits_for_n_files, - parse_tiff_name, generate_tiff_write, write_conv_vol, write_small_intermediate @@ -163,10 +156,26 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]: micron_resolution=volume_cache.get_resolution_um(), backprojection_offset=bp_offset) + if pipeline_input.slice_options.output_mip_level != config.output_mip_level: + scaling_factors, _ = calculate_scaling_factors( + pipeline_input.source_url, + pipeline_input.slice_options.output_mip_level, + config.output_mip_level, + write_shape + ) + else: + scaling_factors = None + print(f"SF: {scaling_factors}") + + # Allocate procs equally between BP math and writing if we're rescaling, otherwise 3-1 favoring + # the BP calculation. + exec_procs = self.num_processes // 4 * (2 if scaling_factors is not None else 3) + write_procs = self.num_processes // 4 * (2 if scaling_factors is not None else 1) + # Process each bounding box in parallel, writing the results to the backprojected volume try: - with (concurrent.futures.ProcessPoolExecutor((self.num_processes // 4) * 3) as executor, - concurrent.futures.ProcessPoolExecutor(self.num_processes // 4) as write_executor): + with (concurrent.futures.ProcessPoolExecutor(exec_procs) as executor, + concurrent.futures.ProcessPoolExecutor(write_procs) as write_executor): bp_futures = [] write_futures = [] @@ -222,10 +231,14 @@ def note_written(write_future): for index in write: write_futures.append(write_executor.submit( write_conv_vol, - tif_write(tifffile.imwrite), i_path.joinpath(f"i_{index:05}"), + tif_write(tifffile.imwrite), + i_path.joinpath(f"i_{index:05}"), ImgSlice(*write_shape[1:]), bool if config.make_backprojection_binary else np.uint16, - folder_path.joinpath(f"{index:05}.tif") + scaling_factors, + folder_path, + index, + config.upsample_order )) write_futures[-1].add_done_callback(note_written) @@ -249,38 +262,6 @@ def note_written(write_future): for fname in get_sorted_tif_files(folder_path): writer(tifffile.imread(folder_path.joinpath(fname))) - # Rescale the backprojected volume to the output mip level - if pipeline_input.slice_options.output_mip_level != config.output_mip_level: - output_name = f"{folder_path}-temp" - - error = rescale_mip_volume( - pipeline_input.source_url, - pipeline_input.slice_options.output_mip_level, - config.output_mip_level, - single_path=(None if config.make_single_file is False else folder_path + ".tif"), - folder_path=(folder_path if config.make_single_file is False else None), - output_name=output_name, - compression=config.backprojection_compression, - max_ram_gb=config.max_ram_gb, - order=config.upsample_order, - binary=config.make_backprojection_binary, - ) - - if error is not None: - return error - - # Remove the original backprojected volume - if config.make_single_file: - os.remove(folder_path + ".tif") - else: - shutil.rmtree(folder_path) - - # Rename the rescaled volume - if config.make_single_file: - os.rename(output_name + ".tif", folder_path + ".tif") - else: - os.rename(output_name, folder_path) - # Update the pipeline input with the output file path pipeline_input.backprojected_folder_path = folder_path @@ -288,6 +269,8 @@ def note_written(write_future): if config.make_single_file: shutil.rmtree(folder_path) + shutil.rmtree(Path(config.output_file_folder, + f"{config.output_file_name}_t_{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}")) return None @@ -387,225 +370,6 @@ def write_z(i, z_slice): return durations, index, z_stack + offset[0] -def rescale_mip_volume( - source_url: str, - current_mip: int, - target_mip: int, - single_path: str = None, - folder_path: str = None, - output_name: str = "out", - compression: str = None, - max_ram_gb: int = 0, - order: int = 2, - binary: bool = False, -) -> str | None: - """ - Rescale the volume to the mip level. - - Parameters - ---------- - source_url : str - The URL of the volume. - current_mip : int - The current mip level of the volume. - target_mip : int - The target mip level of the volume. - single_path : str - The path to the single tif file. - folder_path : str - The path to the folder containing the tif files. - output_name : str - The path to the output. - compression : str, optional - The compression to use for the resulting tif file. - The default is None. - max_ram_gb : int, optional - The maximum amount of RAM to use in GB. - The default is 0. - order : int, optional - The order of the interpolation. - The default is 2. - binary : bool, optional - Whether to make the backprojected volume binary. - - Returns - ------- - str | None - Error message if an error occurred. - """ - - if single_path is None and folder_path is None: - return "Either single_path or folder_path must be provided." - - if target_mip == current_mip: - return None - - if single_path is not None: - return rescale_single_tif( - source_url, - current_mip, - target_mip, - single_path, - compression=compression, - file_name=output_name + ".tif", - max_ram_gb=max_ram_gb, - order=order, - binary=binary, - ) - - return rescale_folder_tif( - source_url, - current_mip, - target_mip, - folder_path, - compression=compression, - folder_name=output_name, - max_ram_gb=max_ram_gb, - order=order, - binary=binary, - ) - - -def rescale_single_tif( - source_url: str, - current_mip: int, - target_mip: int, - single_path: str, - file_name: str = "out.tif", - compression: str = None, - max_ram_gb: int = 0, - order: int = 1, - binary: bool = False, -) -> str | None: - with tifffile.TiffFile(single_path) as tif: - tif_shape = (len(tif.pages),) + tif.pages[0].shape - - scaling_factors, _ = calculate_scaling_factors( - source_url, current_mip, target_mip, tif_shape - ) - - # Calculate the output tiff shape - output_shape = tuple( - int(tif_shape[i] * scaling_factors[i]) for i in range(len(tif_shape)) - ) - - # Note: The chunk size is divided by the scaling factor to account for the - # number of slices that need to be loaded to produce chunk_size slices in the output volume - chunk_size = max( - int( - calculate_chunk_size( - output_shape, tif.pages[0].dtype, max_ram_gb=max_ram_gb - ) - / scaling_factors[0] - ), - 1, - ) - - with tifffile.TiffWriter(file_name) as output_volume: - for i in range(0, tif_shape[0], chunk_size): - # Stack the tif layers along the first axis (chunk_size) - tif_layer = np.array( - [ - tif.pages[j].asarray() - for j in range(i, min(i + chunk_size, tif_shape[0])) - ] - ) - - layers = scipy.ndimage.zoom(tif_layer, scaling_factors, order=order) - - if binary: - layers = make_volume_binary(layers) - - size = layers.shape[0] - - # Save the layers to the tif file - for j in range(size): - output_volume.write( - layers[j], - contiguous=compression is None or compression == "none", - compression=compression, - software="ouroboros", - ) - - return None - - -def rescale_folder_tif( - source_url: str, - current_mip: int, - target_mip: int, - folder_path: str, - folder_name: str = "out", - compression: str = None, - max_ram_gb: int = 0, - order: int = 1, - binary: bool = False, -) -> str | None: - # Create output folder if it doesn't exist - output_folder = folder_name - os.makedirs(output_folder, exist_ok=True) - - tifs = get_sorted_tif_files(folder_path) - - if len(tifs) == 0: - return "No tif files found in the folder." - - sample_tif = tifffile.imread(join_path(folder_path, tifs[0])) - - # Determine the shape of the tif stack - new_shape = (len(tifs), *sample_tif.shape) - - scaling_factors, resolution_factors = calculate_scaling_factors( - source_url, current_mip, target_mip, new_shape - ) - - # Note: The chunk size is divided by the scaling factor to account for the - # number of slices that need to be loaded to produce chunk_size slices in the output volume - chunk_size = max( - int( - calculate_chunk_size(new_shape, sample_tif.dtype, max_ram_gb=max_ram_gb) - / scaling_factors[0] - ), - 1, - ) - - num_digits = num_digits_for_n_files(len(tifs)) - - first_index = parse_tiff_name(tifs[0]) - - output_index = int(first_index * resolution_factors[0]) - - # Resize the volume - for i in range(0, len(tifs), chunk_size): - # Stack the tif layers along the first axis (chunk_size) - tif = np.array( - [ - tifffile.imread(join_path(folder_path, tifs[j])) - for j in range(i, min(i + chunk_size, len(tifs))) - ] - ) - - layers = scipy.ndimage.zoom(tif, scaling_factors, order=order) - - if binary: - layers = make_volume_binary(layers) - - size = layers.shape[0] - - # Write the layers to new tif files - for j in range(size): - tifffile.imwrite( - join_path(output_folder, format_tiff_name(output_index, num_digits)), - layers[j], - contiguous=True if compression is None else False, - compression=compression, - software="ouroboros", - ) - output_index += 1 - - return None - - def calculate_scaling_factors( source_url: str, current_mip: int, target_mip: int, tif_shape: tuple ) -> tuple[tuple, tuple]: diff --git a/python/poetry.lock b/python/poetry.lock index ddb0084..d8b7043 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -382,7 +382,7 @@ description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.9" groups = ["main"] -markers = "sys_platform == \"win32\" or platform_python_implementation != \"CPython\"" +markers = "platform_python_implementation != \"CPython\" or sys_platform == \"win32\"" files = [ {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, @@ -2683,6 +2683,29 @@ files = [ {file = "objprint-0.3.0.tar.gz", hash = "sha256:b5d83f9d62db5b95353bb42959106e1cd43010dcaa3eed1ad8d7d0b2df9b2d5a"}, ] +[[package]] +name = "opencv-python" +version = "4.11.0.86" +description = "Wrapper package for OpenCV python bindings." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "opencv-python-4.11.0.86.tar.gz", hash = "sha256:03d60ccae62304860d232272e4a4fda93c39d595780cb40b161b310244b736a4"}, + {file = "opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:432f67c223f1dc2824f5e73cdfcd9db0efc8710647d4e813012195dc9122a52a"}, + {file = "opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:9d05ef13d23fe97f575153558653e2d6e87103995d54e6a35db3f282fe1f9c66"}, + {file = "opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b92ae2c8852208817e6776ba1ea0d6b1e0a1b5431e971a2a0ddd2a8cc398202"}, + {file = "opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b02611523803495003bd87362db3e1d2a0454a6a63025dc6658a9830570aa0d"}, + {file = "opencv_python-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:810549cb2a4aedaa84ad9a1c92fbfdfc14090e2749cedf2c1589ad8359aa169b"}, + {file = "opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.5", markers = "python_version == \"3.11\""}, +] + [[package]] name = "orjson" version = "3.11.3" @@ -3296,7 +3319,7 @@ description = "C parser in Python" optional = false python-versions = ">=3.8" groups = ["main"] -markers = "(sys_platform == \"win32\" or platform_python_implementation != \"CPython\") and implementation_name != \"PyPy\"" +markers = "implementation_name != \"PyPy\" and (platform_python_implementation != \"CPython\" or sys_platform == \"win32\")" files = [ {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"}, {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"}, @@ -4811,4 +4834,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt [metadata] lock-version = "2.1" python-versions = ">=3.11,<3.13" -content-hash = "566fe1b4e2b06fcd2f10383e1391ad3618b89d19f699e32ae1866f8516df4713" +content-hash = "ba8a314e6c73c093b97bcf55762003e50fe435eea170e90591a56b3892d24f83" diff --git a/python/pyproject.toml b/python/pyproject.toml index 24b079a..b660eb2 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -28,6 +28,7 @@ uuid = "^1.30" sse-starlette = "^2.3.2" typing-extensions = "^4.14.0" uvicorn = "^0.29.0" +opencv-python = "^4.0.0" [tool.poetry.group.dev.dependencies] pytest = "^8.2.2" diff --git a/src/renderer/src/interfaces/options.tsx b/src/renderer/src/interfaces/options.tsx index a0a09fb..b9106e1 100644 --- a/src/renderer/src/interfaces/options.tsx +++ b/src/renderer/src/interfaces/options.tsx @@ -287,15 +287,15 @@ export class BackprojectOptionsFile extends CompoundEntry { 'Base name for all output files.' ), new Entry('output_mip_level', 'Output MIP Level', 0, 'number').withDescription( - 'The MIP level to output the backprojection in (essentially an upsample option). Use this if you downsampled in the slicing step.' + 'The MIP level to output the backprojection in (essentially an upsample option). Use this if you downsampled in the slicing step. MIP levels that would downsample are ignored currently.' ), new Entry( 'upsample_order', - 'Upsample Order (2 = Quadratic)', + 'Upsample Order (2 = Cubic)', 2, 'number' ).withDescription( - 'The interpolation order Ouroboros uses to interpolate values from a lower MIP level. If you check the binary option, feel free to set this to 0.' + 'The interpolation order Ouroboros uses to interpolate values from a lower MIP level (matches opencv interpolation parameter). If you check the binary option, feel free to set this to 0.' ), new Entry( 'backprojection_compression',