diff --git a/dev/env.yaml b/dev/env.yaml index 391c042d..ae7960ff 100644 --- a/dev/env.yaml +++ b/dev/env.yaml @@ -1,7 +1,6 @@ name: core channels: - conda-forge - - defaults dependencies: - bioimageio.spec>=0.5.3 - black @@ -11,6 +10,8 @@ dependencies: - imageio>=2.5 - jupyter - jupyter-black + - ipykernel + - matplotlib - keras>=3.0 - loguru - numpy diff --git a/example/model_creation.ipynb.needs_update b/example/model_creation.ipynb.needs_update deleted file mode 100644 index e45714e7..00000000 --- a/example/model_creation.ipynb.needs_update +++ /dev/null @@ -1,502 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "9943dc62", - "metadata": {}, - "source": [ - "# Create a bioimage.io model package\n", - "\n", - "`bioimageio.core` implements functionality to create a zipped bioimage.io model. This zip can then be run in software that supports the bioimage.io specification and can be uploaded to [bioimage.io](https://bioimage.io/#/) in order to make it publicly available. For details on how to upload the model, see [these instructions](https://bioimage.io/docs/#/contribute_models/README).\n", - "\n", - "Here, we will create three model packages:\n", - "- a toy model with torchscript weights\n", - "- a model based on another bioimage.io model where we add a post-processing step\n", - "- another version of this model where we add a new weight format to it compatible with deepImageJ" - ] - }, - { - "cell_type": "markdown", - "id": "72a590dc", - "metadata": {}, - "source": [ - "## Create a bioimage.io package for a new model\n", - "\n", - "First, we create a zipped model package for a new model. Here, we use a very simple pytorch model and export it to torchscript. In practice you would use the weights for your trained model here. \n", - "\n", - "The procedure for creating a model with another weight format, e.g. keras, tensorflow or onnx, would be very similar." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "07ef310c", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "import os\n", - "import hashlib\n", - "\n", - "# the imports for bioimage.io model export\n", - "import bioimageio.core\n", - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "from bioimageio.core.build_spec import build_model, add_weights" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e5468931", - "metadata": {}, - "outputs": [], - "source": [ - "# create a temporary directory to store intermediate files\n", - "os.makedirs(\"my-model\", exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1df49c2e", - "metadata": {}, - "outputs": [], - "source": [ - "# a very simple pytorch model: just a few convolutions\n", - "model = nn.Sequential(\n", - " nn.Conv2d(1, 16, 3),\n", - " nn.Conv2d(16, 32, 3),\n", - " nn.Conv2d(32, 16, 3),\n", - " nn.Conv2d(16, 1, 1)\n", - ")\n", - "model = torch.jit.script(model)\n", - "\n", - "# save the model weights\n", - "model.save(\"my-model/weights.pt\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "095cc427", - "metadata": {}, - "outputs": [], - "source": [ - "# create test data for this model: an input image and an output image\n", - "# this data will be used for model test runs to ensure the model runs correctly and that the expected output can be reproduced\n", - "# NOTE: if you have pre-and-post-processing in your model (see the more advanced models for an example)\n", - "# you will need to save the input BEFORE preprocessing and the output AFTER postprocessing\n", - "\n", - "input_ = np.random.rand(1, 1, 128, 128).astype(\"float32\") # an example input\n", - "np.save(\"my-model/test-input.npy\", input_)\n", - "with torch.no_grad():\n", - " output = model(torch.from_numpy(input_)).numpy()\n", - "np.save(\"my-model/test-output.npy\", output)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7835f868", - "metadata": {}, - "outputs": [], - "source": [ - "# create markdown documentation for your model\n", - "# this should describe how the model was trained, (and on which data)\n", - "# and also what to take into consideration when running the model, especially how to validate the model\n", - "# here, we just create a stub documentation\n", - "with open(\"my-model/doc.md\", \"w\") as f:\n", - " f.write(\"# My First Model\\n\")\n", - " f.write(\"This model was trained on a very big dataset.\\n\")\n", - " f.write(\"You should not let it get wet or feed it after midnight.\\n\")\n", - " f.write(\"To validate its predictins, make sure that it does not produce any evil clones.\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a9414c1b", - "metadata": {}, - "outputs": [], - "source": [ - "# now we can use the build_model function to create the zipped package.\n", - "# it takes the path to the weights and data we have just created, as well as additional information\n", - "# that will be used to add metadata to the rdf.yaml file in the model zip\n", - "# we only use a subset of the available options here, please refer to the advanced examples and to the\n", - "# function signature of build_model in order to get an overview of the full functionality\n", - "build_model(\n", - " # the weight file and the type of the weights\n", - " weight_uri=\"my-model/weights.pt\",\n", - " weight_type=\"torchscript\",\n", - " # the test input and output data as well as the description of the tensors\n", - " # these are passed as list because we support multiple inputs / outputs per model\n", - " test_inputs=[\"my-model/test-input.npy\"],\n", - " test_outputs=[\"my-model/test-output.npy\"],\n", - " input_axes=[\"bcyx\"],\n", - " output_axes=[\"bcyx\"],\n", - " # where to save the model zip, how to call the model and a short description of it\n", - " output_path=\"my-model/model.zip\",\n", - " name=\"MyFirstModel\",\n", - " description=\"a fancy new model\",\n", - " # additional metadata about authors, licenses, citation etc.\n", - " authors=[{\"name\": \"Gizmo\"}],\n", - " license=\"CC-BY-4.0\",\n", - " documentation=\"my-model/doc.md\",\n", - " tags=[\"nucleus-segmentation\"], # the tags are used to make models more findable on the website\n", - " cite=[{\"text\": \"Gizmo et al.\", \"doi\": \"doi:10.1002/xyzacab123\"}],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0083598", - "metadata": {}, - "outputs": [], - "source": [ - "# finally, we test that the expected outptus are reproduced when running the model.\n", - "# the 'test_model' function runs this test.\n", - "# it will output a list of dictionaries. each dict gives the status of a different test that is being run\n", - "# if all of them contain \"status\": \"passed\" then all tests were successful\n", - "from bioimageio.core.resource_tests import test_model\n", - "my_model = bioimageio.core.load_resource_description(\"my-model/model.zip\")\n", - "test_model(my_model)" - ] - }, - { - "cell_type": "markdown", - "id": "cd73ffcd", - "metadata": {}, - "source": [ - "## Advanced: Modify an existing bioimage.io model\n", - "\n", - "Here, we modify an existing bioimage.io model by adding thresholding as a post-processing step to it.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "09574985", - "metadata": {}, - "outputs": [], - "source": [ - "# we use a model from the webiste, please refer to the model_usage notebook for\n", - "# more details about this model and the general usage of the bioimageio.core library\n", - "doi = \"10.5281/zenodo.6287342\"\n", - "model_resource = bioimageio.core.load_resource_description(doi)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "03eb5ab9", - "metadata": {}, - "outputs": [], - "source": [ - "# get the python file defining the architecture.\n", - "# this is only required for models with pytorch_state_dict weights\n", - "def get_architecture_source(rdf):\n", - " # here, we need the raw resource, which contains the information from the resource description\n", - " # before evaluation, e.g. the file and name of the python file with the model architecture\n", - " raw_resource = bioimageio.core.load_raw_resource_description(rdf)\n", - " # the python file defining the architecture for the pytorch weihgts\n", - " model_source = raw_resource.weights[\"pytorch_state_dict\"].architecture\n", - " # download the source file if necessary\n", - " source_file = bioimageio.core.resource_io.utils.resolve_source(\n", - " model_source.source_file\n", - " )\n", - " # if the source file path does not exist, try combining it with the root path of the model\n", - " if not os.path.exists(source_file):\n", - " source_file = os.path.join(raw_resource.root_path, os.path.split(source_file)[1])\n", - " assert os.path.exists(source_file), source_file\n", - " class_name = model_source.callable_name\n", - " return f\"{source_file}:{class_name}\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c9ae3edc", - "metadata": {}, - "outputs": [], - "source": [ - "import xarray as xr\n", - "# we run prediction with the model once in order to get test outputs\n", - "input_image = np.load(model_resource.test_inputs[0])\n", - "input_array = xr.DataArray(input_image, dims=tuple(model_resource.inputs[0].axes))\n", - "\n", - "with bioimageio.core.create_prediction_pipeline(model_resource) as prediction_pipeline:\n", - " prediction = prediction_pipeline(input_array)[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "72a09243", - "metadata": {}, - "outputs": [], - "source": [ - "# create a subfolder to store the files for the new model\n", - "model_root = \"./new_model\"\n", - "os.makedirs(model_root, exist_ok=True)\n", - "\n", - "# create the expected output tensor (= outputs thresholded at 0.5)\n", - "threshold = 0.5\n", - "new_output = prediction > threshold\n", - "new_output_path = f\"{model_root}/new_test_output.npy\"\n", - "np.save(new_output_path, new_output)\n", - "\n", - "# add thresholding as post-processing procedure to our model\n", - "preprocessing = [[{\"name\": prep.name, \"kwargs\": prep.kwargs} for prep in inp.preprocessing] for inp in model_resource.inputs]\n", - "postprocessing = [[{\"name\": \"binarize\", \"kwargs\": {\"threshold\": threshold}}]]\n", - "\n", - "# get the model architecture\n", - "# note that this is only necessary for pytorch state dict models\n", - "model_source = get_architecture_source(doi)\n", - "\n", - "# we use the `parent` field to indicate that the new model is created based on\n", - "# the nucleus segmentation model we have obtained from bioimage.io\n", - "# this field is optional and only needs to be given for models that are created based on other models from bioimage.io\n", - "# the parent is specified via it's doi and the hash of its rdf file\n", - "model_root_folder = os.path.split(model_resource.weights[\"pytorch_state_dict\"].source)[0]\n", - "rdf_file = os.path.join(model_root_folder, \"rdf.yaml\")\n", - "with open(rdf_file, \"rb\") as f:\n", - " rdf_hash = hashlib.sha256(f.read()).hexdigest()\n", - "parent = {\"uri\": doi, \"sha256\": rdf_hash}\n", - "\n", - "# the name of the new model and where to save the zipped model package\n", - "name = \"new-model1\"\n", - "zip_path = os.path.join(model_root, f\"{name}.zip\")\n", - "\n", - "# `build_model` needs some additional information about the model, like citation information\n", - "# all this additional information is passed as plain python types and will be converted into the bioimageio representation internally\n", - "# for more informantion, check out the function signature\n", - "# https://github.com/bioimage-io/core-bioimage-io-python/blob/main/bioimageio/core/build_spec/build_model.py#L252\n", - "cite = [{\"text\": cite_entry.text, \"url\": cite_entry.url} for cite_entry in model_resource.cite]\n", - "\n", - "# the training data used for the model can also be specified by linking to a dataset available on bioimage.io\n", - "training_data = {\"id\": \"ilastik/stradist_dsb_training_data\"}\n", - "\n", - "# the axes descriptions for the inputs / outputs\n", - "input_axes = [\"bcyx\"]\n", - "output_axes = [\"bcyx\"]\n", - "\n", - "# the pytorch_state_dict weight file\n", - "weight_file = model_resource.weights[\"pytorch_state_dict\"].source\n", - "\n", - "# the path to save the new model with torchscript weights\n", - "zip_path = f\"{model_root}/new_model2.zip\"\n", - "\n", - "# build the model! it will be saved to 'zip_path'\n", - "new_model_raw = build_model(\n", - " weight_uri=weight_file,\n", - " test_inputs=model_resource.test_inputs,\n", - " test_outputs=[new_output_path],\n", - " input_axes=input_axes,\n", - " output_axes=output_axes,\n", - " output_path=zip_path,\n", - " name=name,\n", - " description=\"nucleus segmentation model with thresholding\",\n", - " authors=[{\"name\": \"Jane Doe\"}],\n", - " license=\"CC-BY-4.0\",\n", - " documentation=model_resource.documentation,\n", - " covers=[str(cover) for cover in model_resource.covers],\n", - " tags=[\"nucleus-segmentation\"],\n", - " cite=cite,\n", - " parent=parent,\n", - " architecture=model_source,\n", - " model_kwargs=model_resource.weights[\"pytorch_state_dict\"].kwargs,\n", - " preprocessing=preprocessing,\n", - " postprocessing=postprocessing,\n", - " training_data=training_data,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0bb1a726", - "metadata": {}, - "outputs": [], - "source": [ - "import napari\n", - "# helper function for showing multiple images in napari\n", - "def show_images(*images, names=None):\n", - " v = napari.Viewer()\n", - " for i, im in enumerate(images):\n", - " name = None if names is None else names[i]\n", - " if isinstance(im, str):\n", - " im = imageio.imread(im)\n", - " v.add_image(im, name=name)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b39e6ae6", - "metadata": {}, - "outputs": [], - "source": [ - "# load the new model from the zipped package, run prediction and check the result\n", - "new_model = bioimageio.core.load_resource_description(zip_path)\n", - "with bioimageio.core.create_prediction_pipeline(new_model) as prediction_pipeline:\n", - " prediction = prediction_pipeline(input_array)[0]\n", - "show_images(input_image, prediction, names=[\"input\", \"binarized-prediction\"])" - ] - }, - { - "cell_type": "markdown", - "id": "d883205d", - "metadata": {}, - "source": [ - "## Create model compatible with deepImageJ\n", - "\n", - "Now, we take the model from 10.5281/zenodo.6287342 again, convert its weights to torchscript and add the extra configuration needed to run it in deepImageJ." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c99bca99", - "metadata": {}, - "outputs": [], - "source": [ - "# load the new model from the zipped package, run prediction and check the result\n", - "new_model = bioimageio.core.load_resource_description(zip_path)\n", - "\n", - "# `convert_weigths_to_pytorch_script` creates torchscript weigths based on the weights loaded from pytorch_state_dict\n", - "from bioimageio.core.weight_converter.torch import convert_weights_to_torchscript\n", - "\n", - "# the path to save the newly created torchscript weights\n", - "weight_path_ts = os.path.join(model_root, \"weights.torchscript\")\n", - "convert_weights_to_torchscript(new_model, weight_path_ts)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c1729de7", - "metadata": {}, - "outputs": [], - "source": [ - "# build the new model with the original pytorch_state_dict weights and with the deepimagej config\n", - "# we don't apply the post-processing here\n", - "\n", - "# the path to save the new model with torchscript weights\n", - "temp_zip_path = f\"{model_root}/new_model3.zip\"\n", - "\n", - "_ = build_model(\n", - " weight_uri=weight_file,\n", - " weight_type=\"pytorch_state_dict\",\n", - " architecture=model_source,\n", - " model_kwargs=model_resource.weights[\"pytorch_state_dict\"].kwargs,\n", - " test_inputs=model_resource.test_inputs,\n", - " test_outputs=model_resource.test_outputs,\n", - " input_axes=input_axes,\n", - " output_axes=output_axes,\n", - " output_path=temp_zip_path,\n", - " name=name,\n", - " description=\"nucleus segmentation model with thresholding\",\n", - " authors=[{\"name\": \"Jane Doe\"}],\n", - " license=\"CC-BY-4.0\",\n", - " documentation=model_resource.documentation,\n", - " covers=[str(cover) for cover in model_resource.covers],\n", - " tags=[\"nucleus-segmentation\"],\n", - " cite=cite,\n", - " parent=parent,\n", - " preprocessing=None,\n", - " add_deepimagej_config=True\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d01f39c7", - "metadata": {}, - "outputs": [], - "source": [ - "# Add the new torchsript weights in the rdf.yaml, in addition to the original pytorch_state_dict\n", - "# Note: this will only work if the weights are added in the order presented here:\n", - " # 1. build_model with the pytorch_state_dict (original)\n", - " # 2. add_weights with the torchscript weights (new)\n", - " # possible reason: cache has the info of original weights only\n", - "\n", - "final_zip_path = f\"{model_root}/new_model_added_weigths.zip\"\n", - "\n", - "_=add_weights(\n", - " model=temp_zip_path,\n", - " output_path=final_zip_path,\n", - " weight_uri=weight_path_ts,\n", - " weight_type=\"torchscript\",\n", - " pytorch_version = torch.__version__\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1097b0bb", - "metadata": {}, - "outputs": [], - "source": [ - "# Test both weight types to see that they produce the same test results\n", - "\n", - "for wf in [\"torchscript\", \"pytorch_state_dict\"]:\n", - " print(\"\\n-- Testing for weight format {} ----------------------------\".format(wf))\n", - " res = bioimageio.core.resource_tests.test_model(final_zip_path,\n", - " weight_format=wf,\n", - " decimal=6) # put many decimals so that the test fails\n", - " for r in res:\n", - " print(\"- {}: \\t {}\".format(r[\"name\"], r[\"status\"]))\n", - " print(r[\"error\"]) # print the disagreeing error in decimals (same for both weight types)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "46185255", - "metadata": {}, - "outputs": [], - "source": [ - "# load the new model from the zipped package, run prediction and check the result\n", - "new_model = bioimageio.core.load_resource_description(final_zip_path)\n", - "with bioimageio.core.create_prediction_pipeline(new_model) as prediction_pipeline:\n", - " prediction = prediction_pipeline(input_array)[0]\n", - "show_images(input_image, prediction, names=[\"input\", \"binarized-prediction\"])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19466e19", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.17" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/example/model_usage.ipynb b/example/model_usage.ipynb index a83d9c29..388faa07 100644 --- a/example/model_usage.ipynb +++ b/example/model_usage.ipynb @@ -6,9 +6,7 @@ "source": [ "# Bioimage Model Zoo Core Example notebook\n", "\n", - "This notebook shows how to interact with the `bioimageio.core` programmatically to explore, load, use, and export content from the [BioImage Model Zoo](https://bioimage.io).\n", - "\n", - "For a local use of this notebook the tested versions of essential required packages can be found in `model_usage_requirements.txt` located in the same folder as the notebook." + "This notebook shows how to interact with the `bioimageio.core` programmatically to explore, load, use, and export content from the [BioImage Model Zoo](https://bioimage.io)." ] }, { @@ -22,7 +20,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 0.1. If running on Google Colab, install necessary dependencies" + "### 0.1. Install necessary dependencies" ] }, { @@ -31,10 +29,12 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "\n", - "if os.getenv(\"COLAB_RELEASE_TAG\"):\n", - " %pip install bioimageio.core==0.6.7 torch==2.3.1 onnxruntime==1.18.0" + "try:\n", + " import bioimageio.core\n", + " import torch\n", + " import matplotlib\n", + "except ImportError:\n", + " %pip install bioimageio.core==0.6.7 torch==2.3.1 matplotlib==3.9.0" ] }, { @@ -198,13 +198,7 @@ "if \"draft\" in BMZ_MODEL_ID or \"draft\" in BMZ_MODEL_DOI or \"draft\" in BMZ_MODEL_URL:\n", " print(\n", " f\"\\nThis is the DRAFT version of '{model.name}'. \\nDraft versions have not been reviewed by the Bioimage Model Zoo Team and may contain harmful code. Run with caution.\"\n", - " )\n", - "\n", - "# To be added later:\n", - "# elif model.version != model.lastest_version:\n", - "# print('\\nThe loaded version of the model is: ' + model.version, 'but the latest version of the model is: ' + model.lastest_version)\n", - "\n", - "# TODO: on the model loading success responses add version loaded" + " )" ] }, { @@ -213,7 +207,7 @@ "source": [ "### 1.3 Inspect the model metadata\n", "\n", - "Model metadata includes author names, affiliations, license, and documentation." + "Let's inspect all the model metadata. For a step-by-step inspection refer to [bioimageio.spec package example notebook](https://github.com/bioimage-io/spec-bioimage-io/blob/main/example/load_model_and_create_your_own.ipynb)." ] }, { @@ -222,52 +216,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"The model '{model.name}' has the following properties and metadata\\n\")\n", - "print(f\" Description:\")\n", - "pprint(model.description)\n", - "\n", - "print(\"\\n The authors of the model are: \")\n", - "pprint(model.authors)\n", - "\n", - "print(\"\\n It is maintained by:\")\n", - "pprint(model.maintainers)\n", - "\n", - "pprint(\"\\n License: \")\n", - "pprint(model.license)\n", - "\n", - "print(\"\\n If you use this model, you are expected to cite it as: \")\n", - "pprint(model.cite)\n", - "\n", - "print(f\"\\n Further documentation can be found here: {model.documentation.absolute()}\")\n", - "\n", - "if model.git_repo == None:\n", - " print(f\"\\nNo associated GitHub repository.\")\n", - "else:\n", - " print(f\"\\n GitHub repository: \")\n", - " pprint(model.git_repo)\n", - "\n", - "print(f\"\\n Covers of the model '{model.name}' are: \")\n", - "for cover in model.covers:\n", - " cover_data = imread(download(cover).path)\n", - " plt.figure(figsize=(10, 10))\n", - " plt.imshow(cover_data)\n", - " plt.xticks([])\n", - " plt.yticks([])\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1.4 Get expected model inputs and outputs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Each model expects Inputs and returns Outputs with specific features. The following code shows how to get the full metadata of the expected inputs and outputs of the model." + "pprint(model)" ] }, { @@ -276,35 +225,14 @@ "metadata": {}, "outputs": [], "source": [ - "print(\n", - " f\"Model '{model.name}' requires {len(model.inputs)} input(s) with the following features:\"\n", - ")\n", - "for ipt in model.inputs:\n", - " print(f\"\\ninput '{ipt.id}' with axes:\")\n", - " pprint(ipt.axes)\n", - " print(f\"Data description: {ipt.data}\")\n", - " print(f\"Test tensor available at: {ipt.test_tensor.source.absolute()}\")\n", - " if len(ipt.preprocessing) > 1:\n", - " print(\"This input is preprocessed with: \")\n", - " for p in ipt.preprocessing:\n", - " print(p)\n", - "\n", - "print(\n", - " \"\\n-------------------------------------------------------------------------------\"\n", - ")\n", - "# # and what the model outputs are\n", - "print(\n", - " f\"Model '{model.name}' requires {len(model.outputs)} output(s) with the following features:\"\n", - ")\n", - "for out in model.outputs:\n", - " print(f\"\\noutput '{out.id}' with axes:\")\n", - " pprint(out.axes)\n", - " print(f\"Data description: {out.data}\")\n", - " print(f\"Test tensor available at: {out.test_tensor.source.absolute()}\")\n", - " if len(out.postprocessing) > 1:\n", - " print(\"This output is postprocessed with: \")\n", - " for p in out.postprocessing:\n", - " print(p)" + "print(f\"\\n Covers of the model '{model.name}' are: \")\n", + "for cover in model.covers:\n", + " cover_data = imread(download(cover).path)\n", + " plt.figure(figsize=(10, 10))\n", + " plt.imshow(cover_data)\n", + " plt.xticks([])\n", + " plt.yticks([])\n", + " plt.show()" ] }, { @@ -516,7 +444,7 @@ "source": [ "Use the new prediction pipeline to run a prediction for the previously loaded test image.\n", "\n", - "The prediction pipeline returns a `Sample` object, which will be displayed via napari." + "The prediction pipeline returns a `Sample` object, which will be displayed." ] }, { diff --git a/example/model_usage_requirements.txt b/example/model_usage_requirements.txt deleted file mode 100644 index 3e47ab6e..00000000 --- a/example/model_usage_requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -python == 3.11 -bioimageio.core == 0.6.7 -ipykernel == 6.29.4 -matplotlib == 3.9.0 -torch == 2.3.1 -onnxruntime == 1.18.0 \ No newline at end of file diff --git a/setup.py b/setup.py index a5065eaa..7aa66e16 100644 --- a/setup.py +++ b/setup.py @@ -54,6 +54,8 @@ "filelock", "jupyter", "jupyter-black", + "ipykernel", + "matplotlib", "keras>=3.0", "onnxruntime", "packaging>=17.0",