diff --git a/.coverage b/.coverage index 172cde3..c6fe2e9 100644 Binary files a/.coverage and b/.coverage differ diff --git a/python/ouroboros/helpers/files.py b/python/ouroboros/helpers/files.py index b2e33bf..a8cb17e 100644 --- a/python/ouroboros/helpers/files.py +++ b/python/ouroboros/helpers/files.py @@ -194,15 +194,16 @@ def load_z_intermediate(path: Path, offset: int = 0): def increment_volume(path: Path, vol: np.ndarray, offset: int = 0, cleanup=False): indicies, values, weights = load_z_intermediate(path, offset) - np.add.at(vol[0], indicies, values) - np.add.at(vol[1], indicies, weights) + for i in range(0, vol.shape[0] - 1): + np.add.at(vol[i], indicies, np.atleast_2d(values)[i]) + np.add.at(vol[-1], indicies, weights) if cleanup: path.unlink() def volume_from_intermediates(path: Path, shape: DataShape, thread_count: int = 4): - vol = np.zeros((2, np.prod((shape.Y, shape.X))), dtype=np.float32) + vol = np.zeros((1 + shape.C, np.prod((shape.Y, shape.X))), dtype=np.float32) with ThreadPool(thread_count) as pool: if not path.exists(): # We don't have any intermediate(s) for this value, so return empty. @@ -214,9 +215,9 @@ def volume_from_intermediates(path: Path, shape: DataShape, thread_count: int = offset_set = range(0, len(tif.series), 4) pool.starmap(increment_volume, [(path, vol, i, False) for i in offset_set]) - nz = np.flatnonzero(vol[0]) - vol[0, nz] /= vol[1, nz] - return vol[0] + nz = np.flatnonzero(vol[-1]) + vol[:-1, nz] /= vol[-1, nz] + return vol[:-1] def write_conv_vol(writer: callable, source_path, shape, dtype, scaling, target_folder, index, interpolation): @@ -228,7 +229,7 @@ def write_conv_vol(writer: callable, source_path, shape, dtype, scaling, target_ start = time.perf_counter() # CV2 is only 2D but we're resizing from the 1D image anyway at the moment. new_volume = cv2.resize( - np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True), + np_convert(dtype, vol.T.reshape(shape.Y, shape.X, shape.C), normalize=False, safe_bool=True), None, fx=scaling[1], fy=scaling[2], interpolation=interpolation) perf["Zoom"] = time.perf_counter() - start start = time.perf_counter() @@ -237,7 +238,7 @@ def write_conv_vol(writer: callable, source_path, shape, dtype, scaling, target_ perf["Write Merged"] = time.perf_counter() - start else: start = time.perf_counter() - writer(target_folder.joinpath(f"{index}"), - data=np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True)) + writer(target_folder.joinpath(f"{index}.tif"), + data=np_convert(dtype, vol.T.reshape(shape.Y, shape.X, shape.C), normalize=False, safe_bool=True)) perf["Write Merged"] = time.perf_counter() - start return perf diff --git a/python/ouroboros/helpers/shapes.py b/python/ouroboros/helpers/shapes.py index 1bfd2f5..eacd73b 100644 --- a/python/ouroboros/helpers/shapes.py +++ b/python/ouroboros/helpers/shapes.py @@ -208,6 +208,8 @@ def of(cls, source: DataShape): @dataclass class ImgSlice(DataShape): Y: int; X: int # noqa: E701,E702 @dataclass +class ImgSliceC(DataShape): Y: int; X: int; C: int # noqa: E701,E702 +@dataclass class Proj(DataShape): Y: int; X: int # noqa: E701,E702 @dataclass class YSlice(DataShape): Theta: int; X: int # noqa: E701,E702 diff --git a/python/ouroboros/helpers/slice.py b/python/ouroboros/helpers/slice.py index 577ba2f..1f9845e 100644 --- a/python/ouroboros/helpers/slice.py +++ b/python/ouroboros/helpers/slice.py @@ -168,7 +168,7 @@ def slice_volume_from_grids( def _apply_weights(values, weights, corner): # This looks stupid but is twice as fast as fancy indexing with prod c_weights = weights[corner[0], 0, :] * weights[corner[1], 1, :] * weights[corner[2], 2, :] - w_values = values * c_weights + w_values = values * c_weights[..., np.newaxis] return w_values, c_weights @@ -188,13 +188,15 @@ def backproject_box(bounding_box: BoundingBox, slice_rects: np.ndarray, slices: # No slices, just return return np.empty((0), dtype=np.uint32), np.empty(0, dtype=np.float32), np.empty(0, dtype=np.float32) + channels = 1 if len(slices.shape) < 4 else slices.shape[-1] + # Plot the backprojected X/Y/Z coordinates for each of the slice rects in this chunk. - grid_call = partial(coordinate_grid, shape=slices[0].shape, floor=bounding_box.get_min(), flip=True) + grid_call = partial(coordinate_grid, shape=slices[0].shape[:2], floor=bounding_box.get_min(), flip=True) precise_points = np.concatenate(list(map(grid_call, slice_rects))) # Flatten values to 1-Dimension Array for Efficient Allocation. # Use ZYX domain rather than XYZ as the former is what is written to disk. - values = slices.flatten() + values = slices.flatten().reshape(-1, channels) zyx_shape = np.flip(bounding_box.get_shape()) flat_shape = np.prod(zyx_shape) @@ -202,7 +204,7 @@ def backproject_box(bounding_box: BoundingBox, slice_rects: np.ndarray, slices: squish_type = np.min_scalar_type(flat_shape) # Allocate the flattened data volume. - volume = np.zeros((2, flat_shape), dtype=np.float32) + volume = np.zeros((1 + channels, flat_shape), dtype=np.float32) # Get the top corner (integer) points and full weight matrix. points, weights = _points_and_weights(precise_points.reshape(-1, 3).T, zyx_shape, squish_type) @@ -212,14 +214,15 @@ def backproject_box(bounding_box: BoundingBox, slice_rects: np.ndarray, slices: w_values, c_weights = _apply_weights(values, weights, corner) point_inc = np.ravel_multi_index(corner, zyx_shape).astype(squish_type) - np.add.at(volume[0], points + point_inc, w_values) - np.add.at(volume[1], points + point_inc, c_weights) + for i in range(0, channels): + np.add.at(volume[i], points + point_inc, w_values[..., i]) + np.add.at(volume[-1], points + point_inc, c_weights) # Get indicies of the flattened Z-Y-X backprojected domain that have values. - nz_vol = np.flatnonzero(volume[0]) + nz_vol = np.flatnonzero(volume[-1]) # Return indicies and only the volume region with values. - return nz_vol, volume[0, nz_vol].squeeze(), volume[1, nz_vol].squeeze() + return nz_vol, volume[:-1, nz_vol].squeeze(), volume[-1, nz_vol].squeeze() def make_volume_binary(volume: np.ndarray, dtype=np.uint8) -> np.ndarray: diff --git a/python/ouroboros/pipeline/backproject_pipeline.py b/python/ouroboros/pipeline/backproject_pipeline.py index b011154..2c7283b 100644 --- a/python/ouroboros/pipeline/backproject_pipeline.py +++ b/python/ouroboros/pipeline/backproject_pipeline.py @@ -34,7 +34,7 @@ write_conv_vol, write_small_intermediate ) -from ouroboros.helpers.shapes import DataRange, ImgSlice +from ouroboros.helpers.shapes import DataRange, ImgSliceC DEFAULT_CHUNK_SIZE = 160 @@ -84,10 +84,12 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]: # tiff format check to add FPShape = FrontProjStack(D=len(list(Path(straightened_volume_path).iterdir())), V=tif.pages[0].shape[0], U=tif.pages[0].shape[1]) + channels = 1 if len(tif.pages[0].shape) < 3 else tif.pages[0].shape[-1] else: with tifffile.TiffFile(straightened_volume_path) as tif: is_compressed = bool(tif.pages[0].compression) FPShape = FrontProjStack(D=len(tif.pages), V=tif.pages[0].shape[0], U=tif.pages[0].shape[1]) + channels = 1 if len(tif.pages[0].shape) < 3 else tif.pages[0].shape[-1] cannot_memmap = False try: @@ -154,7 +156,8 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]: tif_write = partial(generate_tiff_write, compression=config.backprojection_compression, micron_resolution=volume_cache.get_resolution_um(), - backprojection_offset=bp_offset) + backprojection_offset=bp_offset, + photometric="rgb" if channels > 1 else "minisblack") if pipeline_input.slice_options.output_mip_level != config.output_mip_level: scaling_factors, _ = calculate_scaling_factors( @@ -165,7 +168,6 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]: ) else: scaling_factors = None - print(f"SF: {scaling_factors}") # Allocate procs equally between BP math and writing if we're rescaling, otherwise 3-1 favoring # the BP calculation. @@ -233,7 +235,7 @@ def note_written(write_future): write_conv_vol, tif_write(tifffile.imwrite), i_path.joinpath(f"i_{index:05}"), - ImgSlice(*write_shape[1:]), + ImgSliceC(*write_shape[1:], channels), bool if config.make_backprojection_binary else np.uint16, scaling_factors, folder_path, @@ -316,7 +318,7 @@ def process_chunk( durations["back_project"] = [time.perf_counter() - start] durations["total_bytes"] = [int(lookup.nbytes + values.nbytes + weights.nbytes)] - if len(values) == 0: + if values.nbytes == 0: # No data to write from this chunk, so return as such. durations["total_process"] = [time.perf_counter() - start_total] return durations, index, [] @@ -354,7 +356,7 @@ def write_z(i, z_slice): file_path.joinpath(f"i_{offset_z:05}").mkdir(exist_ok=True, parents=True) write_small_intermediate(file_path.joinpath(f"i_{offset_z:05}", f"{index}.tif"), np.fromiter(offset_dict.values(), dtype=np.uint32, count=4), - yx_vals[z_slice], values[z_slice], weights[z_slice]) + yx_vals[z_slice], np.atleast_2d(values)[:, z_slice], weights[z_slice]) with ThreadPool(12) as pool: pool.starmap(write_z, enumerate(z_slices))