+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Description details¶
+
+
+
+
+
+
+
+In [4]:
+
+
+
+
+
+root = Path().home() /"Desktop/demo"
+!ls {root}
+
+
+
+
+
+
+
+
+
+
+
+README.md +__pycache__ +cover.png +model.py +model_teaser.py +rdf.yaml +sample_input.tif +sample_output.tif +test_input.npy +test_output.npy +weights.pt +weights_torchscript.pt ++
+
+
+
+
+
+
+In [5]:
+
+
+
+
+
+show(root / "test_input.npy", root / "test_output.npy")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+In [6]:
+
+
+
+
+
+show(root / "cover.png")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Source Code of the model architecture¶
+
+
+
+
+
+
+
+In [7]:
+
+
+
+
+
+from IPython.display import Code
+
+Code(filename=root / "model_teaser.py", language='python')
+
+
+
+
+
+
+
+
+Out[7]:
+
+
+
+import torch.nn as nn
+
+
+class HyLFM_Net(nn.Module):
+ def __init__(
+ self,
+ *,
+ z_out: int,
+ nnum: int,
+ kernel2d: int = 3,
+ conv_per_block2d: int = 2,
+ ...
+ ):
+ ...
+
+ def forward(self, x):
+ x = self.channel_from_lf(x)
+ x = self.res2d(x)
+ x = self.conv2d(x)
+ x = self.c2z(x)
+ x = self.res3d(x)
+ x = self.conv3d(x)
+ ...
+ return x
+
+
+
+
+
+
+
+
+
+Model Weights¶
+
+
+
+
+
+
+
+In [8]:
+
+
+
+
+
+pytorch_weights = torch.load(root / "weights.pt", weights_only=False)
+pprint([(k, tuple(v.shape)) for k, v in pytorch_weights.items()][:4] + ["..."])
+
+
+
+
+
+
+
+
+
+
+
+[('res2d.0.block.0.conv.weight', (768, 361, 3, 3)), + ('res2d.0.block.0.conv.bias', (768,)), + ('res2d.0.block.1.conv.weight', (768, 768, 3, 3)), + ('res2d.0.block.1.conv.bias', (768,)), + '...'] ++
+
+
+
+
+
+
+
+
+Model Input¶
+
+
+
+
+
+
+
+In [ ]:
+
+
+
+
+
+input_descr = InputTensorDescr(
+ id=TensorId("lf"),
+ axes=[BatchAxis(),
+ ChannelAxis(channel_names=[Identifier("lf")]),
+ SpaceInputAxis(
+ id=AxisId('y'),
+ size=ParameterizedSize(min=190, step=19),
+ scale=4,
+ concatenable=False),
+ SpaceInputAxis(
+ id=AxisId('x'),
+ size=ParameterizedSize(min=190, step=19),
+ scale=4,
+ concatenable=False),
+ ],
+ test_tensor=FileDescr(source=root / "test_input.npy"),
+ sample_tensor=FileDescr(source=root / "sample_input.tif"),
+ data=IntervalOrRatioDataDescr(type="float32"),
+ preprocessing = [ScaleRangeDescr(
+ kwargs=ScaleRangeKwargs(
+ axes= (AxisId('y'), AxisId('x')),
+ max_percentile= 99.8,
+ min_percentile= 5.0,
+ )
+ )]
+)
+
+
+
+
+
+
+
+
+
+Model Output¶
+
+
+
+
+
+
+
+In [ ]:
+
+
+
+
+
+from bioimageio.spec.model.v0_5 import OutputTensorDescr, SizeReference, SpaceOutputAxis
+
+output_descr = OutputTensorDescr(
+ id=TensorId("prediction"),
+ description="predicted volume of fluorescence signal",
+ axes=[BatchAxis(),
+ ChannelAxis(channel_names=[Identifier("prediction")]),
+ SpaceOutputAxis(id=AxisId("z"), size=49),
+ SpaceOutputAxis(id=AxisId('y'),
+ scale=19,
+ size=SizeReference(tensor_id=TensorId("lf"),
+ axis_id=AxisId("y"),
+ offset=-16)),
+ SpaceOutputAxis(id=AxisId('x'),
+ scale=19,
+ size=SizeReference(tensor_id=TensorId("lf"),
+ axis_id=AxisId("x"),
+ offset=-16)),
+ ],
+ test_tensor=FileDescr(source=root / "test_output.npy"),
+ sample_tensor=FileDescr(source=root / "sample_output.tif"),
+ data=IntervalOrRatioDataDescr(type="float32"),
+)
+
+
+
+
+
+
+
+
+
+Model Architecture¶
+
+
+
+
+
+
+
+In [ ]:
+
+
+
+
+
+from bioimageio.spec.model.v0_5 import (
+ ArchitectureFromFileDescr,
+ Version,
+)
+
+pytorch_version = Version(torch.__version__)
+
+pytorch_architecture = ArchitectureFromFileDescr(
+ source=root / "model.py",
+ callable=Identifier("HyLFM_Net"),
+ kwargs=dict(
+ c_in_3d=64,
+ last_kernel2d=5,
+ nnum=19,
+ z_out=49,
+ )
+)
+
+
+
+
+
+
+
+
+
+Create a model description¶
+
+
+
+
+
+
+
+In [ ]:
+
+
+
+
+
+my_model_descr = ModelDescr(
+ name="HyLFM-Net-stat",
+ description=(
+ "HyLFM-Net trained on static images of arrested medaka hatchling hearts. "
+ + "The network reconstructs a volumentric image from a given light-field."
+ ),
+ covers=[root / "cover.png"],
+ authors=[
+ Author(
+ name="Fynn Beuttenmueller",
+ affiliation="EMBL Heidelberg",
+ github_user="fynnbe",
+ orcid=OrcidId("0000-0002-8567-6389"))
+ ],
+ cite=[
+ CiteEntry(
+ text=(
+ "Beuttenmueller, Wagner, N., F., Norlin, N. et al."
+ + " Deep learning-enhanced light-field imaging with continuous validation."
+ + " Nat Methods 18, 557–563 (2021)."
+ ),
+ doi=Doi("10.1038/s41592-021-01136-0"),
+ )
+ ],
+ license=LicenseId("MIT"),
+ ...
+
+
+
+
+
+
+
+In [ ]:
+
+
+
+
+
+ ...
+ documentation=root / "README.md",
+ git_repo=HttpUrl("https://github.com/kreshuklab/hylfm-net"),
+ tags= [
+ "light-field-microscopy",
+ "pytorch",
+ "fluorescence-light-microscopy",
+ "image-reconstruction",
+ "nuclei",
+ "hylfm",
+ ],
+ training_data=LinkedDataset(id=DatasetId("uplifting-ice-cream")),
+ inputs=[input_descr],
+ outputs=[output_descr],
+ weights=WeightsDescr(
+ pytorch_state_dict=PytorchStateDictWeightsDescr(
+ source=root / "weights.pt",
+ architecture=pytorch_architecture,
+ pytorch_version=pytorch_version,
+ ),
+ torchscript=TorchscriptWeightsDescr(
+ source=root / "weights_torchscript.pt",
+ pytorch_version=pytorch_version,
+ parent="pytorch_state_dict", # these weights were converted from the pytorch_state_dict weights ones.
+ ),
+ ),
+ attachments=[FileDescr(source=root / "model_teaser.py")],
+)
+
+
+
+
+
+
+
+
+
+Test a described model¶
+
+
+
+
+
+
+
+In [ ]:
+
+
+
+
+
+from bioimageio.core import test_model
+
+validation_summary = test_model(my_model_descr)
+
+
+
+
+
+
+
+In [14]:
+
+
+
+
+
+validation_summary.display()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+✔️ | +bioimageio validation passed | +
---|---|
source | +in-memory | +
format version | +model 0.5.3 | +
bioimageio.spec | +0.5.3.3 | +
bioimageio.core | +0.6.10 | +
❓ | +location | +detail | +
---|---|---|
✔️ | ++ | initialized ModelDescr to describe model 0.5.3 | +
✔️ | ++ | Has expected resource type | +
✔️ | ++ | Reproduce test outputs from test inputs (pytorch_state_dict) | +
✔️ | ++ | Run pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: 0 | +
✔️ | ++ | Run pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: 0 | +
✔️ | ++ | Run pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: 1 | +
✔️ | ++ | Run pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: 1 | +
✔️ | ++ | Run pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: 2 | +
✔️ | ++ | Run pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: 2 | +
✔️ | ++ | Reproduce test outputs from test inputs (torchscript) | +
✔️ | ++ | Run torchscript inference for inputs with batch_size: 1 and size parameter n: 0 | +
✔️ | ++ | Run torchscript inference for inputs with batch_size: 2 and size parameter n: 0 | +
✔️ | ++ | Run torchscript inference for inputs with batch_size: 1 and size parameter n: 1 | +
✔️ | ++ | Run torchscript inference for inputs with batch_size: 2 and size parameter n: 1 | +
✔️ | ++ | Run torchscript inference for inputs with batch_size: 1 and size parameter n: 2 | +
✔️ | ++ | Run torchscript inference for inputs with batch_size: 2 and size parameter n: 2 | +