Skip to content

Commit e72950c

Browse files
hotzenklotzjstriebelphilippotto
authored
Refine Examples/Demos (#486)
* updated example with more comments etc * fix formatting * fixed formatting, add more comments * added link to github to docs * fixed more tests * added pooch to poetry lockfile * fix tests * Apply suggestions from code review Co-authored-by: Philipp Otto <[email protected]> Co-authored-by: Jonathan Striebel <[email protected]> Co-authored-by: Jonathan Striebel <[email protected]> Co-authored-by: Philipp Otto <[email protected]>
1 parent 8cf330c commit e72950c

File tree

11 files changed

+214561
-4410
lines changed

11 files changed

+214561
-4410
lines changed

.gitignore

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,4 +116,10 @@ venv.bak/
116116
.DS_Store
117117
.docker_credentials
118118
*.simg
119-
slurm-*.out
119+
slurm-*.out
120+
121+
# webknossos-libs examples temp output
122+
webknossos/testoutput/*
123+
*/*/output.nml
124+
*/cell_*/*
125+
cell_*/*

docs/src/webknossos-py/examples/learned_segmenter.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ This example trains a segmenter from a volume annotation and applies it to the w
1414
It builds upon the two previous examples using the [Dataset API](dataset_usage.md) and [dataset upload](upload_image_data.md).
1515
Additionally, it downloads [this manual volume annotation of a subset of the skin example dataset](https://webknossos.org/annotations/Explorational/616457c2010000870032ced4) which is used for training.
1616

17-
*This example additionally needs the scikit-learn package.*
17+
*This example additionally needs the scikit-learn and pooch packages.*
1818

1919
```python
2020
--8<--

docs/src/webknossos-py/index.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@ To get started, check out the [installation instructions](installation.md).
2121
- Interaction, connection & scripting with your webKnossos instance over the REST API
2222
- Up- & downloading annotations and datasets
2323

24+
## Source Code
25+
26+
The `webknossos` Python package is [open-source on GitHub][https://github.com/scalableminds/webknossos-libs]. Feel free to report bugs there or open pull requests with your features and fixes.
2427

2528
## License
2629
[AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html)

webknossos/examples/learned_segmenter.py

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,17 +11,21 @@
1111

1212

1313
def main() -> None:
14+
# We are going to use a public demo annotation for this example
15+
1416
annotation = wk.open_annotation(
1517
"https://webknossos.org/annotations/Explorational/616457c2010000870032ced4"
1618
)
19+
20+
# Step 1: Download the dataset and our training data annotation from webKnossos to our local computer
1721
training_data_bbox = wk.BoundingBox.from_tuple6(
1822
annotation.skeleton.user_bounding_boxes[0]
1923
)
2024
time_str = strftime("%Y-%m-%d_%H-%M-%S", gmtime())
2125
new_dataset_name = annotation.dataset_name + f"_segmented_{time_str}"
2226
dataset = wk.download_dataset(
2327
annotation.dataset_name,
24-
"scalable_minds",
28+
organization_name="scalable_minds",
2529
path=new_dataset_name,
2630
)
2731
dataset.name = new_dataset_name
@@ -32,37 +36,45 @@ def main() -> None:
3236
mag = wk.Mag(1)
3337
mag_view = dataset.layers["color"].mags[mag]
3438

39+
# Step 2: Initialize a machine learning model to segment our dataset
3540
features_func = partial(
3641
feature.multiscale_basic_features, multichannel=True, edges=False
3742
)
3843
segmenter = TrainableSegmenter(features_func=features_func)
3944

45+
# Step 3: Manipulate our data to fit the ML model and start training on
46+
# data from our annotated training data bounding box
4047
print("Starting training…")
4148
img_data_train = mag_view.read(
4249
training_data_bbox.in_mag(mag).topleft, training_data_bbox.in_mag(mag).size
43-
)
44-
# move channels to last dimension, remove z dimension
50+
) # wk data has dimensions (Channels, X, Y, Z)
51+
# move channels to last dimension, remove z dimension to match skimage's shape
4552
X_train = np.moveaxis(np.squeeze(img_data_train), 0, -1)
4653
Y_train = np.squeeze(volume_annotation.mags[mag].get_view().read())
54+
4755
segmenter.fit(X_train, Y_train)
4856

57+
# Step 4: Use our trained model and predict a class for each pixel in the dataset
58+
# to get a full segmentation of the data
4959
print("Starting prediction…")
5060
X_predict = np.moveaxis(np.squeeze(mag_view.read()), 0, -1)
5161
Y_predicted = segmenter.predict(X_predict)
5262
segmentation = Y_predicted[:, :, None] # adds z dimension
5363
assert segmentation.max() < 256
5464
segmentation = segmentation.astype("uint8")
5565

66+
# Step 5: Bundle everying a webKnossos layer and upload to wK for viewing and further work
5667
segmentation_layer = dataset.add_layer(
5768
"segmentation",
58-
"segmentation",
69+
wk.SEGMENTATION_CATEGORY,
5970
segmentation.dtype,
6071
compressed=True,
6172
largest_segment_id=int(segmentation.max()),
6273
)
6374
segmentation_layer.bounding_box = dataset.layers["color"].bounding_box
6475
segmentation_layer.add_mag(mag, compress=True).write(segmentation)
6576

77+
# Get your auth token from https://webknossos.org/auth/token
6678
with wk.webknossos_context(url="http://localhost:9000", token="secretScmBoyToken"):
6779
url = dataset.upload()
6880
print(f"Successfully uploaded {url}")

webknossos/examples/skeleton_synapse_candidates.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,22 @@
11
"""
2+
Example application:
3+
Finding synapse candidates with a threshold in a skeleton
4+
annotation where each neuron is represented/reconstructed as one long tree
5+
of many nodes placed reguarly along its axon/dendrite paths.
6+
7+
Method:
28
Load an NML file and consider all pairs of trees.
39
For each tree pair, find the node pairs that have a distance
410
lower than a given threshold.
5-
For these candidates (e.g. synapse candidates with meaningful input data),
6-
new graphs are created which contain a node at the
11+
For these candidates, new annotations are created which contain a node at the
712
center position between the input nodes.
813
"""
914

1015
from itertools import combinations
1116
from typing import Iterator, Tuple
1217

1318
import numpy as np
19+
from scipy.spatial import cKDTree
1420

1521
import webknossos as wk
1622

@@ -20,7 +26,6 @@ def pairs_within_distance(
2026
pos_b: np.ndarray,
2127
max_distance: float,
2228
) -> Iterator[Tuple[np.ndarray, np.ndarray]]:
23-
from scipy.spatial import cKDTree
2429

2530
pos_a_kdtree = cKDTree(pos_a)
2631
pos_b_kdtree = cKDTree(pos_b)

webknossos/examples/upload_image_data.py

Lines changed: 30 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,46 @@
11
from time import gmtime, strftime
22

3+
import numpy as np
34
from skimage import data
45

56
import webknossos as wk
7+
from webknossos.dataset import COLOR_CATEGORY
68

79

810
def main() -> None:
911
with wk.webknossos_context(url="http://localhost:9000", token="secretScmBoyToken"):
10-
img = data.cell()
12+
# load your data - we use an example 3D dataset here
13+
img = data.cells3d() # (z, c, y, x)
14+
15+
# make sure that the dimension of your data has the right order
16+
# we expect the following dimensions: Channels, X, Y, Z.
17+
img = np.transpose(img, [1, 3, 2, 0])
18+
19+
# choose a name for our dataset
1120
time_str = strftime("%Y-%m-%d_%H-%M-%S", gmtime())
1221
name = f"cell_{time_str}"
13-
ds = wk.Dataset.create(name, scale=(107, 107, 107))
14-
layer = ds.add_layer(
15-
"color",
16-
"color",
22+
23+
# scale is defined in nm
24+
ds = wk.Dataset.create(name, scale=(260, 260, 290))
25+
26+
# The example microscopy data has two channels
27+
# Channel 0 contains cell membranes, channel 1 contains nuclei.
28+
layer_membranes = ds.add_layer(
29+
"cell membranes",
30+
COLOR_CATEGORY,
31+
dtype_per_layer=img.dtype,
32+
)
33+
34+
layer_membranes.add_mag(1, compress=True).write(img[0, :])
35+
36+
layer_nuclei = ds.add_layer(
37+
"nuclei",
38+
COLOR_CATEGORY,
1739
dtype_per_layer=img.dtype,
1840
)
19-
# add channel and z dimensions and put X before Y,
20-
# resulting dimensions are C, X, Y, Z.
21-
layer.add_mag(1, compress=True).write(img.T[None, :, :, None])
41+
42+
layer_nuclei.add_mag(1, compress=True).write(img[1, :])
43+
2244
url = ds.upload()
2345
print(f"Successfully uploaded {url}")
2446

0 commit comments

Comments
 (0)