Skip to content

Commit

Permalink
Refine Examples/Demos (#486)
Browse files Browse the repository at this point in the history
* updated example with more comments etc

* fix formatting

* fixed formatting, add more comments

* added link to github to docs

* fixed more tests

* added pooch to poetry lockfile

* fix tests

* Apply suggestions from code review

Co-authored-by: Philipp Otto <[email protected]>

Co-authored-by: Jonathan Striebel <[email protected]>
Co-authored-by: Jonathan Striebel <[email protected]>
Co-authored-by: Philipp Otto <[email protected]>
  • Loading branch information
4 people authored Dec 8, 2021
1 parent 8cf330c commit e72950c
Show file tree
Hide file tree
Showing 11 changed files with 214,561 additions and 4,410 deletions.
8 changes: 7 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -116,4 +116,10 @@ venv.bak/
.DS_Store
.docker_credentials
*.simg
slurm-*.out
slurm-*.out

# webknossos-libs examples temp output
webknossos/testoutput/*
*/*/output.nml
*/cell_*/*
cell_*/*
2 changes: 1 addition & 1 deletion docs/src/webknossos-py/examples/learned_segmenter.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ This example trains a segmenter from a volume annotation and applies it to the w
It builds upon the two previous examples using the [Dataset API](dataset_usage.md) and [dataset upload](upload_image_data.md).
Additionally, it downloads [this manual volume annotation of a subset of the skin example dataset](https://webknossos.org/annotations/Explorational/616457c2010000870032ced4) which is used for training.

*This example additionally needs the scikit-learn package.*
*This example additionally needs the scikit-learn and pooch packages.*

```python
--8<--
Expand Down
3 changes: 3 additions & 0 deletions docs/src/webknossos-py/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ To get started, check out the [installation instructions](installation.md).
- Interaction, connection & scripting with your webKnossos instance over the REST API
- Up- & downloading annotations and datasets

## Source Code

The `webknossos` Python package is [open-source on GitHub][https://github.com/scalableminds/webknossos-libs]. Feel free to report bugs there or open pull requests with your features and fixes.

## License
[AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html)
Expand Down
20 changes: 16 additions & 4 deletions webknossos/examples/learned_segmenter.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,21 @@


def main() -> None:
# We are going to use a public demo annotation for this example

annotation = wk.open_annotation(
"https://webknossos.org/annotations/Explorational/616457c2010000870032ced4"
)

# Step 1: Download the dataset and our training data annotation from webKnossos to our local computer
training_data_bbox = wk.BoundingBox.from_tuple6(
annotation.skeleton.user_bounding_boxes[0]
)
time_str = strftime("%Y-%m-%d_%H-%M-%S", gmtime())
new_dataset_name = annotation.dataset_name + f"_segmented_{time_str}"
dataset = wk.download_dataset(
annotation.dataset_name,
"scalable_minds",
organization_name="scalable_minds",
path=new_dataset_name,
)
dataset.name = new_dataset_name
Expand All @@ -32,37 +36,45 @@ def main() -> None:
mag = wk.Mag(1)
mag_view = dataset.layers["color"].mags[mag]

# Step 2: Initialize a machine learning model to segment our dataset
features_func = partial(
feature.multiscale_basic_features, multichannel=True, edges=False
)
segmenter = TrainableSegmenter(features_func=features_func)

# Step 3: Manipulate our data to fit the ML model and start training on
# data from our annotated training data bounding box
print("Starting training…")
img_data_train = mag_view.read(
training_data_bbox.in_mag(mag).topleft, training_data_bbox.in_mag(mag).size
)
# move channels to last dimension, remove z dimension
) # wk data has dimensions (Channels, X, Y, Z)
# move channels to last dimension, remove z dimension to match skimage's shape
X_train = np.moveaxis(np.squeeze(img_data_train), 0, -1)
Y_train = np.squeeze(volume_annotation.mags[mag].get_view().read())

segmenter.fit(X_train, Y_train)

# Step 4: Use our trained model and predict a class for each pixel in the dataset
# to get a full segmentation of the data
print("Starting prediction…")
X_predict = np.moveaxis(np.squeeze(mag_view.read()), 0, -1)
Y_predicted = segmenter.predict(X_predict)
segmentation = Y_predicted[:, :, None] # adds z dimension
assert segmentation.max() < 256
segmentation = segmentation.astype("uint8")

# Step 5: Bundle everying a webKnossos layer and upload to wK for viewing and further work
segmentation_layer = dataset.add_layer(
"segmentation",
"segmentation",
wk.SEGMENTATION_CATEGORY,
segmentation.dtype,
compressed=True,
largest_segment_id=int(segmentation.max()),
)
segmentation_layer.bounding_box = dataset.layers["color"].bounding_box
segmentation_layer.add_mag(mag, compress=True).write(segmentation)

# Get your auth token from https://webknossos.org/auth/token
with wk.webknossos_context(url="http://localhost:9000", token="secretScmBoyToken"):
url = dataset.upload()
print(f"Successfully uploaded {url}")
Expand Down
11 changes: 8 additions & 3 deletions webknossos/examples/skeleton_synapse_candidates.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,22 @@
"""
Example application:
Finding synapse candidates with a threshold in a skeleton
annotation where each neuron is represented/reconstructed as one long tree
of many nodes placed reguarly along its axon/dendrite paths.
Method:
Load an NML file and consider all pairs of trees.
For each tree pair, find the node pairs that have a distance
lower than a given threshold.
For these candidates (e.g. synapse candidates with meaningful input data),
new graphs are created which contain a node at the
For these candidates, new annotations are created which contain a node at the
center position between the input nodes.
"""

from itertools import combinations
from typing import Iterator, Tuple

import numpy as np
from scipy.spatial import cKDTree

import webknossos as wk

Expand All @@ -20,7 +26,6 @@ def pairs_within_distance(
pos_b: np.ndarray,
max_distance: float,
) -> Iterator[Tuple[np.ndarray, np.ndarray]]:
from scipy.spatial import cKDTree

pos_a_kdtree = cKDTree(pos_a)
pos_b_kdtree = cKDTree(pos_b)
Expand Down
38 changes: 30 additions & 8 deletions webknossos/examples/upload_image_data.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,46 @@
from time import gmtime, strftime

import numpy as np
from skimage import data

import webknossos as wk
from webknossos.dataset import COLOR_CATEGORY


def main() -> None:
with wk.webknossos_context(url="http://localhost:9000", token="secretScmBoyToken"):
img = data.cell()
# load your data - we use an example 3D dataset here
img = data.cells3d() # (z, c, y, x)

# make sure that the dimension of your data has the right order
# we expect the following dimensions: Channels, X, Y, Z.
img = np.transpose(img, [1, 3, 2, 0])

# choose a name for our dataset
time_str = strftime("%Y-%m-%d_%H-%M-%S", gmtime())
name = f"cell_{time_str}"
ds = wk.Dataset.create(name, scale=(107, 107, 107))
layer = ds.add_layer(
"color",
"color",

# scale is defined in nm
ds = wk.Dataset.create(name, scale=(260, 260, 290))

# The example microscopy data has two channels
# Channel 0 contains cell membranes, channel 1 contains nuclei.
layer_membranes = ds.add_layer(
"cell membranes",
COLOR_CATEGORY,
dtype_per_layer=img.dtype,
)

layer_membranes.add_mag(1, compress=True).write(img[0, :])

layer_nuclei = ds.add_layer(
"nuclei",
COLOR_CATEGORY,
dtype_per_layer=img.dtype,
)
# add channel and z dimensions and put X before Y,
# resulting dimensions are C, X, Y, Z.
layer.add_mag(1, compress=True).write(img.T[None, :, :, None])

layer_nuclei.add_mag(1, compress=True).write(img[1, :])

url = ds.upload()
print(f"Successfully uploaded {url}")

Expand Down
Loading

0 comments on commit e72950c

Please sign in to comment.