Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 20 additions & 13 deletions python/ouroboros/helpers/files.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
from functools import partial
from multiprocessing.pool import ThreadPool
import os
import shutil
from threading import Thread

import numpy as np
from numpy.typing import ArrayLike
from pathlib import Path
from tifffile import imread, TiffWriter, TiffFile
from tifffile import TiffWriter, TiffFile
import time

from .shapes import DataShape
Expand Down Expand Up @@ -123,24 +121,33 @@ def num_digits_for_n_files(n: int) -> int:
return len(str(n - 1))


def np_convert(dtype: np.dtype, source: ArrayLike, normalize=True):
if not normalize:
return source.astype(dtype)
if np.issubdtype(dtype, np.integer):
dtype_range = np.iinfo(dtype).max - np.iinfo(dtype).min
def np_convert(target_dtype: np.dtype, source: ArrayLike, normalize=True, safe_bool=False):
""" TODO: Fix for Negative Values """
if safe_bool and target_dtype == bool:
return source.astype(target_dtype).astype(np.uint8)
elif np.issubdtype(target_dtype, np.integer) and normalize:
dtype_range = np.iinfo(target_dtype).max - np.iinfo(target_dtype).min
source_range = np.max(source) - np.min(source)

# Avoid divide by 0, esp. as numpy segfaults when you do.
if source_range == 0.0:
source_range = 1.0

return (source * max(int(dtype_range / source_range), 1)).astype(dtype)
elif np.issubdtype(dtype, np.floating):
return source.astype(dtype)
return (source * max(int(dtype_range / source_range), 1)).astype(target_dtype)
elif np.issubdtype(target_dtype, np.floating) and normalize:
source_range = np.max(source) - np.min(source)

# Avoid divide by 0, esp. as numpy segfaults when you do.
if source_range == 0.0:
source_range = 1.0

return (source / source_range).astype(target_dtype)
else:
return source.astype(target_dtype)


def generate_tiff_write(write_func: callable, compression: str | None, micron_resolution: np.ndarray[float],
backprojection_offset: np.ndarray, **kwargs):
backprojection_offset: np.ndarray, **kwargs):
# Volume cache resolution is in voxel size, but .tiff XY resolution is in voxels per unit, so we invert.
resolution = [1.0 / voxel_size for voxel_size in micron_resolution[:2] * 0.0001]
resolutionunit = "CENTIMETER"
Expand Down Expand Up @@ -217,6 +224,6 @@ def write_conv_vol(writer: callable, source_path, shape, dtype, *args, **kwargs)
vol = volume_from_intermediates(source_path, shape)
perf["Merge Volume"] = time.perf_counter() - start
start = time.perf_counter()
writer(*args, data=np_convert(dtype, vol.reshape(shape.Y, shape.X), False), **kwargs)
writer(*args, data=np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True), **kwargs)
perf["Write Merged"] = time.perf_counter() - start
return perf
16 changes: 13 additions & 3 deletions python/ouroboros/helpers/slice.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,27 +201,37 @@ def backproject_box(bounding_box: BoundingBox, slice_rects: np.ndarray, slices:
# No slices, just return
return np.empty((0), dtype=np.uint32), np.empty(0, dtype=np.float32), np.empty(0, dtype=np.float32)

# Plot the backprojected X/Y/Z coordinates for each of the slice rects in this chunk.
grid_call = partial(coordinate_grid, shape=slices[0].shape, floor=bounding_box.get_min(), flip=True)
precise_points = np.concatenate(list(map(grid_call, slice_rects)))

# Flatten values to 1-Dimension Array for Efficient Allocation.
# Use ZYX domain rather than XYZ as the former is what is written to disk.
values = slices.flatten()
zyx_shape = np.flip(bounding_box.get_shape())
flat_shape = np.prod(zyx_shape)

grid_call = partial(coordinate_grid, shape=slices[0].shape, floor=bounding_box.get_min(), flip=True)
precise_points = np.concatenate(list(map(grid_call, slice_rects)))
# Determine minimum dtype for index of flattened shape.
squish_type = np.min_scalar_type(flat_shape)

# Allocate the flattened data volume.
volume = np.zeros((2, flat_shape), dtype=np.float32)
squish_type = np.min_scalar_type(flat_shape)

# Get the top corner (integer) points and full weight matrix.
points, weights = _points_and_weights(precise_points.reshape(-1, 3).T, zyx_shape, squish_type)

# Sequentially allocate the points and weights for each corner to the flattened array.
for corner in np.array(list(np.ndindex(2, 2, 2))):
w_values, c_weights = _apply_weights(values, weights, corner)
point_inc = np.ravel_multi_index(corner, zyx_shape).astype(squish_type)

np.add.at(volume[0], points + point_inc, w_values)
np.add.at(volume[1], points + point_inc, c_weights)

# Get indicies of the flattened Z-Y-X backprojected domain that have values.
nz_vol = np.flatnonzero(volume[0])

# Return indicies and only the volume region with values.
return nz_vol, volume[0, nz_vol].squeeze(), volume[1, nz_vol].squeeze()


Expand Down
12 changes: 6 additions & 6 deletions python/ouroboros/helpers/volume_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,22 +252,22 @@ def update_writable_rects(processed: np.ndarray, slice_rects: np.ndarray, min_di

Parameters:
-----------
processed (np.ndarray): Marker of which chunks are processed,
by their (Z, Y, X) indicies.
processed (np.ndarray): Marker of which chunks are processed,
by their (Z, Y, X) indicies.
slice_rects: All full-size slice rects from the straightened volume.
min_dim (int): Minimum (z) dimension of the full object.
writeable (np.ndarray): Tracker of writable Z-stacks (index 0 = z min_dim).
Values: 0 (not writeable), 1 (writable), 2 (dispatched to writer).
Values: 0 (not writeable), 1 (writable), 2 (dispatched to writer).
chunk_size (int): Size of 3D chunk in (z) dimension.

Return:
-------
np.ndarray: Sorted values in the given dimension that ready to be written to.

"""
# Each chunk covers part of chunk_size slice_rects in z (straightened) dimension,
# except last may be shorter (so capped to length of slice_rects).
# A full slice_rect is ready if all (y, x) chunks for it are processed.
# Each chunk covers part of chunk_size slice_rects in z (straightened) dimension,
# except last may be shorter (so capped to length of slice_rects).
# A full slice_rect is ready if all (y, x) chunks for it are processed.
processed_slices = np.repeat(np.all(processed, axis=(1, 2)), chunk_size)[:len(slice_rects)]
if np.all(processed_slices):
# All slice_rects processed, remaining z (backprojected) slices are to be written.
Expand Down
30 changes: 22 additions & 8 deletions python/ouroboros/pipeline/backproject_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,18 +127,25 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]:
print(f"\nFront Projection Shape: {FPShape}")
print(f"\nBack Projection Shape (Z/Y/X):{write_shape}")

pipeline_input.output_file_path = f"{config.output_file_name}_{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}"
pipeline_input.output_file_path = (f"{config.output_file_name}_"
f"{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}")
folder_path = Path(config.output_file_folder, pipeline_input.output_file_path)
folder_path.mkdir(exist_ok=True, parents=True)

i_path = Path(config.output_file_folder,
f"{config.output_file_name}_t_{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}")

if config.make_single_file:
is_big_tiff = calculate_gigabytes_from_dimensions(np.prod(write_shape), np.uint16) > 4 # Check Dtype
is_big_tiff = calculate_gigabytes_from_dimensions(
np.prod(write_shape),
np.uint8 if config.make_backprojection_binary else np.uint16) > 4
else:
is_big_tiff = calculate_gigabytes_from_dimensions(np.prod(write_shape[1:]), np.uint16) > 4 # Check Dtype
is_big_tiff = calculate_gigabytes_from_dimensions(
np.prod(write_shape[1:]),
np.uint8 if config.make_backprojection_binary else np.uint16) > 4

# Generate image writing function
# Combining compression with binary images can cause issues.
bp_offset = pipeline_input.backprojection_offset if config.backproject_min_bounding_box else None
tif_write = partial(generate_tiff_write,
compression=config.backprojection_compression,
Expand Down Expand Up @@ -205,7 +212,9 @@ def note_written(write_future):
write_futures.append(write_executor.submit(
write_conv_vol,
tif_write(tifffile.imwrite), i_path.joinpath(f"i_{index:05}"),
ImgSlice(*write_shape[1:]), np.uint16, folder_path.joinpath(f"{index:05}.tif")
ImgSlice(*write_shape[1:]),
bool if config.make_backprojection_binary else np.uint16,
folder_path.joinpath(f"{index:05}.tif")
))
write_futures[-1].add_done_callback(note_written)

Expand Down Expand Up @@ -265,8 +274,8 @@ def note_written(write_future):
pipeline_input.backprojected_folder_path = folder_path

self.add_timing("export", time.perf_counter() - start)
if config.make_single_file:

if config.make_single_file:
shutil.rmtree(folder_path)

return None
Expand All @@ -285,8 +294,13 @@ def process_chunk(
start_total = time.perf_counter()

# Load the straightened volume
straightened_volume = tifffile.memmap(straightened_volume_path, mode="r")
durations["memmap"] = [time.perf_counter() - start_total]
try:
straightened_volume = tifffile.memmap(straightened_volume_path, mode="r")
durations["memmap"] = [time.perf_counter() - start_total]
except BaseException as be:
print(f"Error loading Volume: {be} : {straightened_volume_path}")
traceback.print_tb(be.__traceback__, file=sys.stderr)
raise be

# Get the slices from the straightened volume Dumb but maybe bugfix?
start = time.perf_counter()
Expand Down
53 changes: 43 additions & 10 deletions python/test/helpers/test_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from pathlib import Path

import numpy as np
from tifffile import imwrite, TiffFile

from ouroboros.helpers.files import (
format_backproject_output_file,
Expand Down Expand Up @@ -169,14 +168,6 @@ def test_num_digits_for_n_files():
assert result == 2


def test_np_convert():
float_data = np.linspace(0, 1, 16)
int_data = np_convert(np.uint16, float_data)

assert np.all(int_data == np.arange(0, np.iinfo(np.uint16).max + 1, np.iinfo(np.uint16).max // 15))
assert np.all(np_convert(np.float32, int_data) == int_data.astype(np.float32))


def test_generate_tiff_write(tmp_path):
micron_resolution = np.array([0.7, 0.7, 0.7])
backprojection_offset = (55, 44, 77)
Expand Down Expand Up @@ -284,10 +275,52 @@ def test_increment_volume(tmp_path):
assert np.allclose(volume[1, mapped_source[0]], np.sum(source_weights[[0, 2]]))
assert np.all(np.nonzero(volume)[0] == np.array([0, 0, 1, 1]))
assert np.all(np.nonzero(volume)[1] == np.array([3947, 3952, 3947, 3952]))

assert not sample_path.exists()


def test_np_convert_from_int():
base = np.random.randint(0, 10, 6400).reshape(80, 80)

# Direct Conversion
assert np.all(np_convert(np.float32, base, normalize=False) == base.astype(np.float32))

# Normalized Conversion
assert np.all(np_convert(np.float32, base) == base.astype(np.float32) / (np.max(base) - np.min(base)))

# Safe Bool
safe_bool = np_convert(bool, base, safe_bool=True)
assert safe_bool.dtype == np.uint8
assert np.all(safe_bool == (base > 0))

# Unsafe Bool - Raw bool datatype
safe_bool = np_convert(bool, base, safe_bool=False)
assert safe_bool.dtype == bool
assert np.all(safe_bool == (base > 0))


def test_np_convert_from_float():
base = np.random.randint(0, 16, 6400).reshape(80, 80) * np.random.rand(80, 80)
float_data = np.linspace(0, 1, 16)

# Direct Conversion
assert np.all(np_convert(np.uint16, float_data, normalize=False) == [0] * 15 + [1])

# Normalized Conversion
assert np.all(np_convert(np.uint16, float_data) ==
np.arange(0, np.iinfo(np.uint16).max + 1, np.iinfo(np.uint16).max // 15))

# Safe Bool
safe_bool = np_convert(bool, base, safe_bool=True)
assert safe_bool.dtype == np.uint8
assert np.all(safe_bool == (base > 0))

# Unsafe Bool - Raw bool datatype
safe_bool = np_convert(bool, base, safe_bool=False)
assert safe_bool.dtype == bool
assert np.all(safe_bool == (base > 0))


def test_volume_from_intermediates():
pass

Expand Down