Skip to content

Commit 2875c74

Browse files
authored
ds.add_layer_from_images: added topleft and dtype kwargs (#818)
* ds.add_layer_from_images: added topleft and enforce_dtype kwargs * add changelog entries, update docstring * cleanup dtype typing, make compatible with old numpy versions * format * specify ImportError explicitly * rename enforce_dtype to dtype
1 parent 9154e68 commit 2875c74

File tree

5 files changed

+53
-30
lines changed

5 files changed

+53
-30
lines changed

webknossos/Changelog.md

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,10 @@ For upgrade instructions, please check the respective *Breaking Changes* section
1313
[Commits](https://github.com/scalableminds/webknossos-libs/compare/v0.10.22...HEAD)
1414

1515
### Breaking Changes
16+
- `ds.add_layer_from_images`: Turned some arguments into keyword-only arguments, only affecting positional arguments after the first 8 arguments. [#818](https://github.com/scalableminds/webknossos-libs/pull/818)
1617

1718
### Added
19+
- `ds.add_layer_from_images`: added topleft and dtype kw-only arguments. [#818](https://github.com/scalableminds/webknossos-libs/pull/818)
1820

1921
### Changed
2022

@@ -25,11 +27,9 @@ For upgrade instructions, please check the respective *Breaking Changes* section
2527
[Commits](https://github.com/scalableminds/webknossos-libs/compare/v0.10.21...v0.10.22)
2628

2729
### Fixed
28-
2930
- Fixed a bug where some image sequences could not be read in layer_from_images. [#817](https://github.com/scalableminds/webknossos-libs/pull/817)
3031

3132

32-
3333
## [0.10.21](https://github.com/scalableminds/webknossos-libs/releases/tag/v0.10.21) - 2022-10-26
3434
[Commits](https://github.com/scalableminds/webknossos-libs/compare/v0.10.20...v0.10.21)
3535

@@ -44,8 +44,6 @@ For upgrade instructions, please check the respective *Breaking Changes* section
4444
- `annotation.temporary_volume_layer_copy()` works also with empty volume annotations. [#814](https://github.com/scalableminds/webknossos-libs/pull/814)
4545

4646

47-
48-
4947
## [0.10.19](https://github.com/scalableminds/webknossos-libs/releases/tag/v0.10.19) - 2022-10-18
5048
[Commits](https://github.com/scalableminds/webknossos-libs/compare/v0.10.18...v0.10.19)
5149

webknossos/tests/dataset/test_add_layer_from_images.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,9 @@ def test_compare_tifffile(tmp_path: Path) -> None:
2222
layer_name="compare_tifffile",
2323
compress=True,
2424
category="segmentation",
25+
topleft=(100, 100, 55),
2526
)
27+
assert l.bounding_box.topleft == wk.Vec3Int(100, 100, 55)
2628
data = l.get_finest_mag().read()[0, :, :]
2729
for z_index in range(0, data.shape[-1]):
2830
with TiffFile("testdata/tiff/test.0000.tiff") as tif_file:
@@ -60,8 +62,8 @@ def test_compare_tifffile(tmp_path: Path) -> None:
6062
),
6163
(
6264
"testdata/rgb_tiff/test_rgb.tif",
63-
{"mag": 2, "channel": 1},
64-
"uint8",
65+
{"mag": 2, "channel": 1, "dtype": "uint32"},
66+
"uint32",
6567
1,
6668
(64, 64, 2),
6769
),

webknossos/webknossos/dataset/_utils/pims_images.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
import numpy as np
2222

23+
from webknossos.dataset.layer import DTypeLike
2324
from webknossos.dataset.mag_view import MagView
2425
from webknossos.geometry.vec3_int import Vec3Int
2526

@@ -32,6 +33,10 @@
3233

3334

3435
class PimsImages:
36+
dtype: DTypeLike
37+
expected_shape: Vec3Int
38+
num_channels: int
39+
3540
def __init__(
3641
self,
3742
images: Union[str, Path, "pims.FramesSequence", List[Union[str, PathLike]]],
@@ -294,7 +299,11 @@ def _open_images(
294299
yield images
295300

296301
def copy_to_view(
297-
self, args: Tuple[int, int], mag_view: MagView, is_segmentation: bool
302+
self,
303+
args: Tuple[int, int],
304+
mag_view: MagView,
305+
is_segmentation: bool,
306+
dtype: Optional[DTypeLike] = None,
298307
) -> Tuple[Tuple[int, int], Optional[int]]:
299308
"""Copies the images according to the passed arguments to the given mag_view.
300309
args is expected to be the start and end of the z-range, meant for usage with an executor."""
@@ -310,7 +319,7 @@ def copy_to_view(
310319
if self._flip_z:
311320
images = images[::-1] # pylint: disable=unsubscriptable-object
312321
with mag_view.get_buffered_slice_writer(
313-
absolute_offset=(0, 0, z_start * mag_view.mag.z),
322+
relative_offset=(0, 0, z_start * mag_view.mag.z),
314323
buffer_size=mag_view.info.chunk_shape.z,
315324
) as writer:
316325
for image_slice in images[z_start:z_end]:
@@ -339,6 +348,9 @@ def copy_to_view(
339348
if self._flip_y:
340349
image_slice = np.flip(image_slice, -1)
341350

351+
if dtype is not None:
352+
image_slice = image_slice.astype(dtype, order="F")
353+
342354
if max_id is not None:
343355
max_id = max(max_id, image_slice.max())
344356
shapes.append(image_slice.shape[-2:])

webknossos/webknossos/dataset/dataset.py

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@
5858
from ._utils.from_images import guess_if_segmentation_path
5959
from ._utils.infer_bounding_box_existing_files import infer_bounding_box_existing_files
6060
from .layer import (
61+
DTypeLike,
6162
Layer,
6263
SegmentationLayer,
6364
_dtype_per_channel_to_element_class,
@@ -738,8 +739,8 @@ def add_layer(
738739
self,
739740
layer_name: str,
740741
category: LayerCategoryType,
741-
dtype_per_layer: Optional[Union[str, np.dtype, type]] = None,
742-
dtype_per_channel: Optional[Union[str, np.dtype, type]] = None,
742+
dtype_per_layer: Optional[DTypeLike] = None,
743+
dtype_per_channel: Optional[DTypeLike] = None,
743744
num_channels: Optional[int] = None,
744745
data_format: Union[str, DataFormat] = DEFAULT_DATA_FORMAT,
745746
**kwargs: Any,
@@ -846,8 +847,8 @@ def get_or_add_layer(
846847
self,
847848
layer_name: str,
848849
category: LayerCategoryType,
849-
dtype_per_layer: Optional[Union[str, np.dtype, type]] = None,
850-
dtype_per_channel: Optional[Union[str, np.dtype, type]] = None,
850+
dtype_per_layer: Optional[DTypeLike] = None,
851+
dtype_per_channel: Optional[DTypeLike] = None,
851852
num_channels: Optional[int] = None,
852853
data_format: Union[str, DataFormat] = DEFAULT_DATA_FORMAT,
853854
**kwargs: Any,
@@ -975,17 +976,19 @@ def add_layer_from_images(
975976
chunk_shape: Optional[Union[Vec3IntLike, int]] = None,
976977
chunks_per_shard: Optional[Union[int, Vec3IntLike]] = None,
977978
compress: bool = False,
979+
*,
978980
## other arguments
981+
topleft: Vec3IntLike = Vec3Int.zeros(), # in Mag(1)
979982
swap_xy: bool = False,
980983
flip_x: bool = False,
981984
flip_y: bool = False,
982985
flip_z: bool = False,
986+
dtype: Optional[DTypeLike] = None,
983987
use_bioformats: bool = False,
984988
channel: Optional[int] = None,
985989
timepoint: Optional[int] = None,
986990
batch_size: Optional[int] = None, # defaults to shard-size z
987991
executor: Optional[Executor] = None,
988-
*,
989992
chunk_size: Optional[Union[Vec3IntLike, int]] = None, # deprecated
990993
) -> Layer:
991994
"""
@@ -1005,8 +1008,10 @@ def add_layer_from_images(
10051008
* `data_format`: by default wkw files are written, may be set to "zarr"
10061009
* `mag`: magnification to use for the written data
10071010
* `chunk_shape`, `chunks_per_shard`, `compress`: adjust how the data is stored on disk
1011+
* `topleft`: set an offset in Mag(1) to start writing the data, only affecting the output
10081012
* `swap_xy`: set to `True` to interchange x and y axis before writing to disk
1009-
* `flip_x`, `flip_y`, `flip_z`: set to `True` to flip the respective axis before writing to disk
1013+
* `flip_x`, `flip_y`, `flip_z`: set to `True` to reverse the respective axis before writing to disk
1014+
* `dtype`: the read image data will be convertoed to this dtype using `numpy.ndarray.astype`
10101015
* `use_bioformats`: set to `True` to use the [pims bioformats adapter](https://soft-matter.github.io/pims/v0.6.1/bioformats.html), needs a JVM
10111016
* `channel`: may be used to select a single channel, if multiple are available,
10121017
* `timepoint`: for timeseries, select a timepoint to use by specifying it as an int, starting from 0
@@ -1041,7 +1046,7 @@ def add_layer_from_images(
10411046
layer_name=layer_name,
10421047
category=category,
10431048
data_format=data_format,
1044-
dtype_per_channel=pims_images.dtype,
1049+
dtype_per_channel=pims_images.dtype if dtype is None else dtype,
10451050
num_channels=pims_images.num_channels,
10461051
**add_layer_kwargs, # type: ignore[arg-type]
10471052
)
@@ -1052,9 +1057,11 @@ def add_layer_from_images(
10521057
compress=compress,
10531058
)
10541059
mag = mag_view.mag
1055-
layer.bounding_box = BoundingBox(
1056-
(0, 0, 0), pims_images.expected_shape
1057-
).from_mag_to_mag1(mag)
1060+
layer.bounding_box = (
1061+
BoundingBox((0, 0, 0), pims_images.expected_shape)
1062+
.from_mag_to_mag1(mag)
1063+
.offset(topleft)
1064+
)
10581065

10591066
if batch_size is None:
10601067
if compress:
@@ -1074,6 +1081,7 @@ def add_layer_from_images(
10741081
pims_images.copy_to_view,
10751082
mag_view=mag_view,
10761083
is_segmentation=category == "segmentation",
1084+
dtype=dtype,
10771085
)
10781086

10791087
args = []
@@ -1109,8 +1117,10 @@ def add_layer_from_images(
11091117
max_id = max(max_ids)
11101118
cast(SegmentationLayer, layer).largest_segment_id = max_id
11111119
actual_size = Vec3Int(dimwise_max(shapes) + (pims_images.expected_shape.z,))
1112-
layer.bounding_box = BoundingBox((0, 0, 0), actual_size).from_mag_to_mag1(
1113-
mag
1120+
layer.bounding_box = (
1121+
BoundingBox((0, 0, 0), actual_size)
1122+
.from_mag_to_mag1(mag)
1123+
.offset(topleft)
11141124
)
11151125
if pims_images.expected_shape != actual_size:
11161126
warnings.warn(

webknossos/webknossos/dataset/layer.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,11 @@
5454
)
5555
from .mag_view import MagView, _find_mag_path_on_disk
5656

57+
try:
58+
from numpy.typing import DTypeLike
59+
except ImportError:
60+
DTypeLike = Union[str, np.dtype, type] # type: ignore[misc]
61+
5762

5863
def _is_int(s: str) -> bool:
5964
try:
@@ -64,7 +69,7 @@ def _is_int(s: str) -> bool:
6469

6570

6671
def _convert_dtypes(
67-
dtype: Union[str, np.dtype],
72+
dtype: DTypeLike,
6873
num_channels: int,
6974
dtype_per_layer_to_dtype_per_channel: bool,
7075
) -> str:
@@ -81,9 +86,7 @@ def _convert_dtypes(
8186
return "".join(converted_dtype_parts)
8287

8388

84-
def _normalize_dtype_per_channel(
85-
dtype_per_channel: Union[str, np.dtype, type]
86-
) -> np.dtype:
89+
def _normalize_dtype_per_channel(dtype_per_channel: DTypeLike) -> np.dtype:
8790
try:
8891
return np.dtype(dtype_per_channel)
8992
except TypeError as e:
@@ -92,9 +95,7 @@ def _normalize_dtype_per_channel(
9295
) from e
9396

9497

95-
def _normalize_dtype_per_layer(
96-
dtype_per_layer: Union[str, np.dtype, type]
97-
) -> Union[str, np.dtype]:
98+
def _normalize_dtype_per_layer(dtype_per_layer: DTypeLike) -> DTypeLike:
9899
try:
99100
dtype_per_layer = str(np.dtype(dtype_per_layer))
100101
except Exception:
@@ -103,7 +104,7 @@ def _normalize_dtype_per_layer(
103104

104105

105106
def _dtype_per_layer_to_dtype_per_channel(
106-
dtype_per_layer: Union[str, np.dtype], num_channels: int
107+
dtype_per_layer: DTypeLike, num_channels: int
107108
) -> np.dtype:
108109
try:
109110
return np.dtype(
@@ -118,7 +119,7 @@ def _dtype_per_layer_to_dtype_per_channel(
118119

119120

120121
def _dtype_per_channel_to_dtype_per_layer(
121-
dtype_per_channel: Union[str, np.dtype], num_channels: int
122+
dtype_per_channel: DTypeLike, num_channels: int
122123
) -> str:
123124
return _convert_dtypes(
124125
np.dtype(dtype_per_channel),
@@ -128,7 +129,7 @@ def _dtype_per_channel_to_dtype_per_layer(
128129

129130

130131
def _dtype_per_channel_to_element_class(
131-
dtype_per_channel: Union[str, np.dtype], num_channels: int
132+
dtype_per_channel: DTypeLike, num_channels: int
132133
) -> str:
133134
dtype_per_layer = _dtype_per_channel_to_dtype_per_layer(
134135
dtype_per_channel, num_channels

0 commit comments

Comments
 (0)