From d50d68a098ab042ab8dcc4cbb9e0e30ce04ad280 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Tue, 27 Feb 2024 14:46:01 +0100 Subject: [PATCH] add docstrings and make stuff private --- bioimageio/core/__init__.py | 7 +- bioimageio/core/__main__.py | 327 +++++++++--------- bioimageio/core/{op_base.py => _op_base.py} | 0 ...on_pipeline.py => _prediction_pipeline.py} | 0 .../{resource_tests.py => _resource_tests.py} | 116 +++---- bioimageio/core/common.py | 13 +- bioimageio/core/io.py | 62 ---- bioimageio/core/prediction.py | 2 + bioimageio/core/proc_ops.py | 2 +- bioimageio/core/proc_setup.py | 3 + bioimageio/core/stat_calculators.py | 15 +- bioimageio/core/stat_measures.py | 20 ++ bioimageio/core/utils/__init__.py | 4 +- .../utils/{_tensor_io.py => _digest_spec.py} | 0 bioimageio/core/{ => utils}/image_helper.py | 0 bioimageio/core/weight_converter/__init__.py | 1 + .../core/weight_converter/keras/__init__.py | 2 +- .../keras/{tensorflow.py => _tensorflow.py} | 20 +- .../core/weight_converter/torch/__init__.py | 3 +- .../torch/{onnx.py => _onnx.py} | 2 +- .../torch/{torchscript.py => _torchscript.py} | 2 +- .../torch/{utils.py => _utils.py} | 0 tests/conftest.py | 37 +- tests/test_cli.py | 179 +++++----- tests/test_prediction_pipeline.py | 2 +- ...t_prediction_pipeline_device_management.py | 2 +- tests/test_resource_tests.py | 12 +- tests/{ => utils}/test_image_helper.py | 4 +- .../weight_converter/keras/test_tensorflow.py | 4 + tests/weight_converter/test_add_weights.py | 67 ++-- tests/weight_converter/torch/test_onnx.py | 6 +- .../torch/test_torchscript.py | 4 + 32 files changed, 435 insertions(+), 483 deletions(-) rename bioimageio/core/{op_base.py => _op_base.py} (100%) rename bioimageio/core/{prediction_pipeline.py => _prediction_pipeline.py} (100%) rename bioimageio/core/{resource_tests.py => _resource_tests.py} (99%) delete mode 100644 bioimageio/core/io.py rename bioimageio/core/utils/{_tensor_io.py => _digest_spec.py} (100%) rename bioimageio/core/{ => utils}/image_helper.py (100%) rename bioimageio/core/weight_converter/keras/{tensorflow.py => _tensorflow.py} (85%) rename bioimageio/core/weight_converter/torch/{onnx.py => _onnx.py} (97%) rename bioimageio/core/weight_converter/torch/{torchscript.py => _torchscript.py} (99%) rename bioimageio/core/weight_converter/torch/{utils.py => _utils.py} (100%) rename tests/{ => utils}/test_image_helper.py (84%) diff --git a/bioimageio/core/__init__.py b/bioimageio/core/__init__.py index 29116eaa..926c61d7 100644 --- a/bioimageio/core/__init__.py +++ b/bioimageio/core/__init__.py @@ -15,8 +15,9 @@ from bioimageio.spec import save_bioimageio_yaml_only as save_bioimageio_yaml_only from bioimageio.spec import validate_format as validate_format -from .prediction_pipeline import create_prediction_pipeline as create_prediction_pipeline -from .resource_tests import load_description_and_test as load_description_and_test -from .resource_tests import test_description as test_description +from ._prediction_pipeline import create_prediction_pipeline as create_prediction_pipeline +from ._resource_tests import load_description_and_test as load_description_and_test +from ._resource_tests import test_description as test_description +from ._resource_tests import test_model as test_model test_resource = test_description diff --git a/bioimageio/core/__main__.py b/bioimageio/core/__main__.py index 54ab9425..9e767ef1 100644 --- a/bioimageio/core/__main__.py +++ b/bioimageio/core/__main__.py @@ -1,36 +1,20 @@ import enum -import json -import os import sys -import warnings -from glob import glob from pathlib import Path from typing import List, Optional import typer from typing_extensions import Annotated -from bioimageio.core import __version__, prediction, resource_tests -from bioimageio.spec import load_description, save_bioimageio_package +from bioimageio.core import __version__ +from bioimageio.core import test_description as _test_description +from bioimageio.core import test_model as _test_model +from bioimageio.spec import save_bioimageio_package from bioimageio.spec.collection import CollectionDescr from bioimageio.spec.dataset import DatasetDescr from bioimageio.spec.model import ModelDescr from bioimageio.spec.notebook import NotebookDescr -try: - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - from bioimageio.core.weight_converter import torch as torch_converter -except ImportError: - torch_converter = None - -try: - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - from bioimageio.core.weight_converter import keras as keras_converter -except ImportError: - keras_converter = None - help_version = f"""bioimageio.core {__version__} bioimageio.spec {__version__} implementing: @@ -88,7 +72,11 @@ def package( # typer bug: typer returns empty tuple instead of None if weights_order_priority is not given weights_priority_order = weights_priority_order or None # TODO: check if this is still the case - _ = save_bioimageio_package(source, output_path=path, weights_priority_order=weights_priority_order) + _ = save_bioimageio_package( + source, + output_path=path, + weights_priority_order=None if weights_priority_order is None else [wpo.name for wpo in weights_priority_order], + ) @app.command() @@ -103,7 +91,7 @@ def test_model( # this is a weird typer bug: default devices are empty tuple although they should be None devices = devices or None - summary = resource_tests.test_model( + summary = _test_model( model_rdf, weight_format=None if weight_format is None else weight_format.value, devices=devices, @@ -114,7 +102,7 @@ def test_model( sys.exit(0 if summary.status == "passed" else 1) -test_model.__doc__ = resource_tests.test_model.__doc__ +test_model.__doc__ = _test_model.__doc__ @app.command() @@ -131,159 +119,160 @@ def test_resource( decimal: Annotated[int, typer.Option(help="(for model only) The test precision.")] = 4, ): # this is a weird typer bug: default devices are empty tuple although they should be None - if len(devices) == 0: + if devices is None or len(devices) == 0: devices = None - print(f"\ntesting {rdf}...") - summary = resource_tests.test_description( + + summary = _test_description( rdf, weight_format=None if weight_format is None else weight_format.value, devices=devices, decimal=decimal ) print(summary.format()) sys.exit(0 if summary.status == "passed" else 1) -test_resource.__doc__ = resource_tests.test_description.__doc__ - - -@app.command() -def predict_image( - model_rdf: Annotated[ - Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") - ], - inputs: Annotated[List[Path], typer.Option(help="Path(s) to the model input(s).")], - outputs: Annotated[List[Path], typer.Option(help="Path(s) for saveing the model output(s).")], - # NOTE: typer currently doesn't support union types, so we only support boolean here - # padding: Optional[Union[str, bool]] = typer.Argument( - # None, help="Padding to apply in each dimension passed as json encoded string." - # ), - # tiling: Optional[Union[str, bool]] = typer.Argument( - # None, help="Padding to apply in each dimension passed as json encoded string." - # ), - padding: Annotated[ - Optional[bool], typer.Option(help="Whether to pad the image to a size suited for the model.") - ] = None, - tiling: Annotated[Optional[bool], typer.Option(help="Whether to run prediction in tiling mode.")] = None, - weight_format: Annotated[Optional[WeightsFormatEnum], typer.Option(help="The weight format to use.")] = None, - devices: Annotated[Optional[List[str]], typer.Option(help="Devices for running the model.")] = None, -): - if isinstance(padding, str): - padding = json.loads(padding.replace("'", '"')) - assert isinstance(padding, dict) - if isinstance(tiling, str): - tiling = json.loads(tiling.replace("'", '"')) - assert isinstance(tiling, dict) - - # this is a weird typer bug: default devices are empty tuple although they should be None - if devices is None or len(devices) == 0: - devices = None - - prediction.predict_image( - model_rdf, inputs, outputs, padding, tiling, None if weight_format is None else weight_format.value, devices - ) - - -predict_image.__doc__ = prediction.predict_image.__doc__ - - -@app.command() -def predict_images( - model_rdf: Annotated[ - Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") - ], - input_pattern: Annotated[str, typer.Argument(help="Glob pattern for the input images.")], - output_folder: Annotated[str, typer.Argument(help="Folder to save the outputs.")], - output_extension: Annotated[Optional[str], typer.Argument(help="Optional output extension.")] = None, - # NOTE: typer currently doesn't support union types, so we only support boolean here - # padding: Optional[Union[str, bool]] = typer.Argument( - # None, help="Padding to apply in each dimension passed as json encoded string." - # ), - # tiling: Optional[Union[str, bool]] = typer.Argument( - # None, help="Padding to apply in each dimension passed as json encoded string." - # ), - padding: Annotated[ - Optional[bool], typer.Option(help="Whether to pad the image to a size suited for the model.") - ] = None, - tiling: Annotated[Optional[bool], typer.Option(help="Whether to run prediction in tiling mode.")] = None, - weight_format: Annotated[Optional[WeightsFormatEnum], typer.Option(help="The weight format to use.")] = None, - devices: Annotated[Optional[List[str]], typer.Option(help="Devices for running the model.")] = None, -): - input_files = glob(input_pattern) - input_names = [os.path.split(infile)[1] for infile in input_files] - output_files = [os.path.join(output_folder, fname) for fname in input_names] - if output_extension is not None: - output_files = [f"{os.path.splitext(outfile)[0]}{output_extension}" for outfile in output_files] - - if isinstance(padding, str): - padding = json.loads(padding.replace("'", '"')) - assert isinstance(padding, dict) - if isinstance(tiling, str): - tiling = json.loads(tiling.replace("'", '"')) - assert isinstance(tiling, dict) - - # this is a weird typer bug: default devices are empty tuple although they should be None - if len(devices) == 0: - devices = None - prediction.predict_images( - model_rdf, - input_files, - output_files, - padding=padding, - tiling=tiling, - weight_format=None if weight_format is None else weight_format.value, - devices=devices, - verbose=True, - ) - - -predict_images.__doc__ = prediction.predict_images.__doc__ - - -if torch_converter is not None: - - @app.command() - def convert_torch_weights_to_onnx( - model_rdf: Path = typer.Argument( - ..., help="Path to the model resource description file (rdf.yaml) or zipped model." - ), - output_path: Path = typer.Argument(..., help="Where to save the onnx weights."), - opset_version: Optional[int] = typer.Argument(12, help="Onnx opset version."), - use_tracing: bool = typer.Option(True, help="Whether to use torch.jit tracing or scripting."), - verbose: bool = typer.Option(True, help="Verbosity"), - ): - ret_code = torch_converter.convert_weights_to_onnx(model_rdf, output_path, opset_version, use_tracing, verbose) - sys.exit(ret_code) - - convert_torch_weights_to_onnx.__doc__ = torch_converter.convert_weights_to_onnx.__doc__ - - @app.command() - def convert_torch_weights_to_torchscript( - model_rdf: Path = typer.Argument( - ..., help="Path to the model resource description file (rdf.yaml) or zipped model." - ), - output_path: Path = typer.Argument(..., help="Where to save the torchscript weights."), - use_tracing: bool = typer.Option(True, help="Whether to use torch.jit tracing or scripting."), - ): - torch_converter.convert_weights_to_torchscript(model_rdf, output_path, use_tracing) - sys.exit(0) - - convert_torch_weights_to_torchscript.__doc__ = torch_converter.convert_weights_to_torchscript.__doc__ - - -if keras_converter is not None: - - @app.command() - def convert_keras_weights_to_tensorflow( - model_rdf: Annotated[ - Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") - ], - output_path: Annotated[Path, typer.Argument(help="Where to save the tensorflow weights.")], - ): - rd = load_description(model_rdf) - ret_code = keras_converter.convert_weights_to_tensorflow_saved_model_bundle(rd, output_path) - sys.exit(ret_code) - - convert_keras_weights_to_tensorflow.__doc__ = ( - keras_converter.convert_weights_to_tensorflow_saved_model_bundle.__doc__ - ) +test_resource.__doc__ = _test_description.__doc__ + + +# TODO: add predict commands +# @app.command() +# def predict_image( +# model_rdf: Annotated[ +# Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") +# ], +# inputs: Annotated[List[Path], typer.Option(help="Path(s) to the model input(s).")], +# outputs: Annotated[List[Path], typer.Option(help="Path(s) for saveing the model output(s).")], +# # NOTE: typer currently doesn't support union types, so we only support boolean here +# # padding: Optional[Union[str, bool]] = typer.Argument( +# # None, help="Padding to apply in each dimension passed as json encoded string." +# # ), +# # tiling: Optional[Union[str, bool]] = typer.Argument( +# # None, help="Padding to apply in each dimension passed as json encoded string." +# # ), +# padding: Annotated[ +# Optional[bool], typer.Option(help="Whether to pad the image to a size suited for the model.") +# ] = None, +# tiling: Annotated[Optional[bool], typer.Option(help="Whether to run prediction in tiling mode.")] = None, +# weight_format: Annotated[Optional[WeightsFormatEnum], typer.Option(help="The weight format to use.")] = None, +# devices: Annotated[Optional[List[str]], typer.Option(help="Devices for running the model.")] = None, +# ): +# if isinstance(padding, str): +# padding = json.loads(padding.replace("'", '"')) +# assert isinstance(padding, dict) +# if isinstance(tiling, str): +# tiling = json.loads(tiling.replace("'", '"')) +# assert isinstance(tiling, dict) + +# # this is a weird typer bug: default devices are empty tuple although they should be None +# if devices is None or len(devices) == 0: +# devices = None + +# prediction.predict_image( +# model_rdf, inputs, outputs, padding, tiling, None if weight_format is None else weight_format.value, devices +# ) + + +# predict_image.__doc__ = prediction.predict_image.__doc__ + + +# @app.command() +# def predict_images( +# model_rdf: Annotated[ +# Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") +# ], +# input_pattern: Annotated[str, typer.Argument(help="Glob pattern for the input images.")], +# output_folder: Annotated[str, typer.Argument(help="Folder to save the outputs.")], +# output_extension: Annotated[Optional[str], typer.Argument(help="Optional output extension.")] = None, +# # NOTE: typer currently doesn't support union types, so we only support boolean here +# # padding: Optional[Union[str, bool]] = typer.Argument( +# # None, help="Padding to apply in each dimension passed as json encoded string." +# # ), +# # tiling: Optional[Union[str, bool]] = typer.Argument( +# # None, help="Padding to apply in each dimension passed as json encoded string." +# # ), +# padding: Annotated[ +# Optional[bool], typer.Option(help="Whether to pad the image to a size suited for the model.") +# ] = None, +# tiling: Annotated[Optional[bool], typer.Option(help="Whether to run prediction in tiling mode.")] = None, +# weight_format: Annotated[Optional[WeightsFormatEnum], typer.Option(help="The weight format to use.")] = None, +# devices: Annotated[Optional[List[str]], typer.Option(help="Devices for running the model.")] = None, +# ): +# input_files = glob(input_pattern) +# input_names = [os.path.split(infile)[1] for infile in input_files] +# output_files = [os.path.join(output_folder, fname) for fname in input_names] +# if output_extension is not None: +# output_files = [f"{os.path.splitext(outfile)[0]}{output_extension}" for outfile in output_files] + +# if isinstance(padding, str): +# padding = json.loads(padding.replace("'", '"')) +# assert isinstance(padding, dict) +# if isinstance(tiling, str): +# tiling = json.loads(tiling.replace("'", '"')) +# assert isinstance(tiling, dict) + +# # this is a weird typer bug: default devices are empty tuple although they should be None +# if len(devices) == 0: +# devices = None +# prediction.predict_images( +# model_rdf, +# input_files, +# output_files, +# padding=padding, +# tiling=tiling, +# weight_format=None if weight_format is None else weight_format.value, +# devices=devices, +# verbose=True, +# ) + + +# predict_images.__doc__ = prediction.predict_images.__doc__ + + +# if torch_converter is not None: + +# @app.command() +# def convert_torch_weights_to_onnx( +# model_rdf: Path = typer.Argument( +# ..., help="Path to the model resource description file (rdf.yaml) or zipped model." +# ), +# output_path: Path = typer.Argument(..., help="Where to save the onnx weights."), +# opset_version: Optional[int] = typer.Argument(12, help="Onnx opset version."), +# use_tracing: bool = typer.Option(True, help="Whether to use torch.jit tracing or scripting."), +# verbose: bool = typer.Option(True, help="Verbosity"), +# ): +# ret_code = torch_converter.convert_weights_to_onnx(model_rdf, output_path, opset_version, use_tracing, verbose) +# sys.exit(ret_code) + +# convert_torch_weights_to_onnx.__doc__ = torch_converter.convert_weights_to_onnx.__doc__ + +# @app.command() +# def convert_torch_weights_to_torchscript( +# model_rdf: Path = typer.Argument( +# ..., help="Path to the model resource description file (rdf.yaml) or zipped model." +# ), +# output_path: Path = typer.Argument(..., help="Where to save the torchscript weights."), +# use_tracing: bool = typer.Option(True, help="Whether to use torch.jit tracing or scripting."), +# ): +# torch_converter.convert_weights_to_torchscript(model_rdf, output_path, use_tracing) +# sys.exit(0) + +# convert_torch_weights_to_torchscript.__doc__ = torch_converter.convert_weights_to_torchscript.__doc__ + + +# if keras_converter is not None: + +# @app.command() +# def convert_keras_weights_to_tensorflow( +# model_rdf: Annotated[ +# Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") +# ], +# output_path: Annotated[Path, typer.Argument(help="Where to save the tensorflow weights.")], +# ): +# rd = load_description(model_rdf) +# ret_code = keras_converter.convert_weights_to_tensorflow_saved_model_bundle(rd, output_path) +# sys.exit(ret_code) + +# convert_keras_weights_to_tensorflow.__doc__ = ( +# keras_converter.convert_weights_to_tensorflow_saved_model_bundle.__doc__ +# ) if __name__ == "__main__": diff --git a/bioimageio/core/op_base.py b/bioimageio/core/_op_base.py similarity index 100% rename from bioimageio/core/op_base.py rename to bioimageio/core/_op_base.py diff --git a/bioimageio/core/prediction_pipeline.py b/bioimageio/core/_prediction_pipeline.py similarity index 100% rename from bioimageio/core/prediction_pipeline.py rename to bioimageio/core/_prediction_pipeline.py diff --git a/bioimageio/core/resource_tests.py b/bioimageio/core/_resource_tests.py similarity index 99% rename from bioimageio/core/resource_tests.py rename to bioimageio/core/_resource_tests.py index 2ac15c71..0f30b550 100644 --- a/bioimageio/core/resource_tests.py +++ b/bioimageio/core/_resource_tests.py @@ -6,7 +6,7 @@ import xarray as xr from bioimageio.core import __version__ as bioimageio_core_version -from bioimageio.core.prediction_pipeline import create_prediction_pipeline +from bioimageio.core._prediction_pipeline import create_prediction_pipeline from bioimageio.spec import InvalidDescr, ResourceDescr, build_description, dump_description, load_description from bioimageio.spec._internal.base_nodes import ResourceDescrBase from bioimageio.spec._internal.io_utils import load_array @@ -28,6 +28,63 @@ def test_model( ) +def test_description( + source: Union[ResourceDescr, FileSource, BioimageioYamlContent], + *, + format_version: Union[Literal["discover", "latest"], str] = "discover", + weight_format: Optional[WeightsFormat] = None, + devices: Optional[List[str]] = None, + decimal: int = 4, + expected_type: Optional[str] = None, +) -> ValidationSummary: + """Test RDF dynamically, e.g. model inference of test inputs""" + rd = load_description_and_test( + source, + format_version=format_version, + weight_format=weight_format, + devices=devices, + decimal=decimal, + expected_type=expected_type, + ) + return rd.validation_summary + + +def load_description_and_test( + source: Union[ResourceDescr, FileSource, BioimageioYamlContent], + *, + format_version: Union[Literal["discover", "latest"], str] = "discover", + weight_format: Optional[WeightsFormat] = None, + devices: Optional[List[str]] = None, + decimal: int = 4, + expected_type: Optional[str] = None, +) -> Union[ResourceDescr, InvalidDescr]: + """Test RDF dynamically, e.g. model inference of test inputs""" + if ( + isinstance(source, ResourceDescrBase) + and format_version != "discover" + and source.format_version != format_version + ): + warnings.warn(f"deserializing source to ensure we validate and test using format {format_version}") + source = dump_description(source) + + if isinstance(source, ResourceDescrBase): + rd = source + elif isinstance(source, dict): + rd = build_description(source, format_version=format_version) + else: + rd = load_description(source, format_version=format_version) + + rd.validation_summary.env.append(InstalledPackage(name="bioimageio.core", version=bioimageio_core_version)) + + if expected_type is not None: + _test_expected_resource_type(rd, expected_type) + + if isinstance(rd, (v0_4.ModelDescr, v0_5.ModelDescr)): + _test_model_inference(rd, weight_format, devices, decimal) + + return rd + + def _test_model_inference( model: Union[v0_4.ModelDescr, v0_5.ModelDescr], weight_format: Optional[WeightsFormat], @@ -104,63 +161,6 @@ def _test_expected_resource_type(rd: Union[InvalidDescr, ResourceDescr], expecte ) -def test_description( - source: Union[ResourceDescr, FileSource, BioimageioYamlContent], - *, - format_version: Union[Literal["discover", "latest"], str] = "discover", - weight_format: Optional[WeightsFormat] = None, - devices: Optional[List[str]] = None, - decimal: int = 4, - expected_type: Optional[str] = None, -) -> ValidationSummary: - """Test RDF dynamically, e.g. model inference of test inputs""" - rd = load_description_and_test( - source, - format_version=format_version, - weight_format=weight_format, - devices=devices, - decimal=decimal, - expected_type=expected_type, - ) - return rd.validation_summary - - -def load_description_and_test( - source: Union[ResourceDescr, FileSource, BioimageioYamlContent], - *, - format_version: Union[Literal["discover", "latest"], str] = "discover", - weight_format: Optional[WeightsFormat] = None, - devices: Optional[List[str]] = None, - decimal: int = 4, - expected_type: Optional[str] = None, -) -> Union[ResourceDescr, InvalidDescr]: - """Test RDF dynamically, e.g. model inference of test inputs""" - if ( - isinstance(source, ResourceDescrBase) - and format_version != "discover" - and source.format_version != format_version - ): - warnings.warn(f"deserializing source to ensure we validate and test using format {format_version}") - source = dump_description(source) - - if isinstance(source, ResourceDescrBase): - rd = source - elif isinstance(source, dict): - rd = build_description(source, format_version=format_version) - else: - rd = load_description(source, format_version=format_version) - - rd.validation_summary.env.append(InstalledPackage(name="bioimageio.core", version=bioimageio_core_version)) - - if expected_type is not None: - _test_expected_resource_type(rd, expected_type) - - if isinstance(rd, (v0_4.ModelDescr, v0_5.ModelDescr)): - _test_model_inference(rd, weight_format, devices, decimal) - - return rd - - # def debug_model( # model_rdf: Union[RawResourceDescr, ResourceDescr, URI, Path, str], # *, diff --git a/bioimageio/core/common.py b/bioimageio/core/common.py index 1f0bcd84..1981f2c5 100644 --- a/bioimageio/core/common.py +++ b/bioimageio/core/common.py @@ -1,9 +1,9 @@ from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Dict, Union +from typing import TYPE_CHECKING, Dict import xarray as xr -from bioimageio.spec.model import v0_4, v0_5 +from bioimageio.spec.model import v0_5 if TYPE_CHECKING: from bioimageio.core.stat_measures import Measure, MeasureValue @@ -19,9 +19,10 @@ @dataclass class Sample: - data: Data = field(default_factory=dict) - stat: Stat = field(default_factory=dict) + """A (dataset) sample""" + data: Data = field(default_factory=dict) + """the samples tensors""" -ProcessingDescrBase = Union[v0_4.ProcessingDescrBase, v0_5.ProcessingDescrBase] -ProcessingKwargs = Union[v0_4.ProcessingKwargs, v0_5.ProcessingKwargs] + stat: Stat = field(default_factory=dict) + """sample and dataset statistics""" diff --git a/bioimageio/core/io.py b/bioimageio/core/io.py deleted file mode 100644 index 1f27b60e..00000000 --- a/bioimageio/core/io.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import annotations - -from contextlib import nullcontext -from typing import Literal, Optional, Union - -from bioimageio.core.resource_tests import test_description -from bioimageio.spec import load_description as load_description -from bioimageio.spec._description import ResourceDescr -from bioimageio.spec._internal.constants import DISCOVER -from bioimageio.spec._internal.io_utils import open_bioimageio_yaml -from bioimageio.spec._internal.validation_context import ValidationContext -from bioimageio.spec.common import BioimageioYamlContent, FileSource, InvalidDescr -from bioimageio.spec.summary import ValidationSummary - - -def load_description_and_test( - source: FileSource, - /, - *, - format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER, -) -> Union[ResourceDescr, InvalidDescr]: - opened = open_bioimageio_yaml(source) - - return build_description_and_test( - opened.content, - context=ValidationContext(root=opened.original_root, file_name=opened.original_file_name), - format_version=format_version, - ) - - -def build_description_and_test( - data: BioimageioYamlContent, - /, - *, - context: Optional[ValidationContext] = None, - format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER, -) -> Union[ResourceDescr, InvalidDescr]: - """load and validate a BioImage.IO description from the content of a resource description file (RDF)""" - if context is None: - val_context = nullcontext() - else: - val_context = context - - with val_context: - rd = test_description(data, format_version=format_version) - - return rd - - -def validate( - source: "FileSource | BioimageioYamlContent", - /, - *, - context: Optional[ValidationContext] = None, - format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER, -) -> ValidationSummary: - if isinstance(source, dict): - rd = build_description_and_test(source, context=context, format_version=format_version) - else: - rd = load_description_and_test(source, format_version=format_version) - - return rd.validation_summary diff --git a/bioimageio/core/prediction.py b/bioimageio/core/prediction.py index 228bfa63..e9ec7256 100644 --- a/bioimageio/core/prediction.py +++ b/bioimageio/core/prediction.py @@ -1,3 +1,5 @@ +"""coming soon""" + # TODO: update # import collections # import os diff --git a/bioimageio/core/proc_ops.py b/bioimageio/core/proc_ops.py index d055c059..a0a2a1f1 100644 --- a/bioimageio/core/proc_ops.py +++ b/bioimageio/core/proc_ops.py @@ -19,6 +19,7 @@ from numpy.typing import DTypeLike from typing_extensions import Self, assert_never +from bioimageio.core._op_base import Operator from bioimageio.core.common import ( AxisId, Sample, @@ -26,7 +27,6 @@ Tensor, TensorId, ) -from bioimageio.core.op_base import Operator from bioimageio.core.stat_calculators import StatsCalculator from bioimageio.core.stat_measures import ( DatasetMean, diff --git a/bioimageio/core/proc_setup.py b/bioimageio/core/proc_setup.py index 4c504681..a71ba023 100644 --- a/bioimageio/core/proc_setup.py +++ b/bioimageio/core/proc_setup.py @@ -40,6 +40,9 @@ def setup_pre_and_postprocessing( keep_updating_initial_dataset_stats: bool = False, fixed_dataset_stats: Mapping[DatasetMeasure, MeasureValue] = MappingProxyType({}), ) -> PreAndPostprocessing: + """ + Get pre- and postprocessing operators for a `model` description. + userd in `bioimageio.core.create_prediction_pipeline""" prep, post, prep_meas, post_meas = _prepare_setup_pre_and_postprocessing(model) missing_dataset_stats = {m for m in prep_meas | post_meas if m not in fixed_dataset_stats} diff --git a/bioimageio/core/stat_calculators.py b/bioimageio/core/stat_calculators.py index 3b0045da..3cc4d67e 100644 --- a/bioimageio/core/stat_calculators.py +++ b/bioimageio/core/stat_calculators.py @@ -65,6 +65,8 @@ def quantile(self, q: Any) -> Any: class MeanCalculator: + """to calculate sample and dataset mean""" + def __init__(self, tensor_id: TensorId, axes: Optional[Sequence[AxisId]]): super().__init__() self._n: int = 0 @@ -115,6 +117,8 @@ def finalize(self) -> Dict[DatasetMean, MeasureValue]: class MeanVarStdCalculator: + """to calculate sample and dataset mean, variance or standard deviation""" + def __init__(self, tensor_id: TensorId, axes: Optional[Sequence[AxisId]]): super().__init__() self._axes = None if axes is None else tuple(axes) @@ -181,6 +185,8 @@ def finalize(self) -> Dict[Union[DatasetMean, DatasetVar, DatasetStd], MeasureVa class SamplePercentilesCalculator: + """to calculate sample percentiles""" + def __init__(self, tensor_id: TensorId, axes: Optional[Sequence[AxisId]], ns: Collection[float]): super().__init__() assert all(0 <= n <= 100 for n in ns) @@ -196,6 +202,9 @@ def compute(self, sample: Sample) -> Dict[SamplePercentile, MeasureValue]: class MeanPercentilesCalculator: + """to calculate dataset percentiles heuristically by averaging across samples + **note**: the returned dataset percentiles are an estiamte and **not mathematically correct**""" + def __init__(self, tensor_id: TensorId, axes: Optional[Sequence[AxisId]], ns: Collection[float]): super().__init__() assert all(0 <= n <= 100 for n in ns) @@ -234,6 +243,8 @@ def finalize(self) -> Dict[DatasetPercentile, MeasureValue]: class CrickPercentilesCalculator: + """to calculate dataset percentiles with the experimental [crick libray](https://github.com/dask/crick)""" + def __init__(self, tensor_id: TensorId, axes: Optional[Sequence[AxisId]], ns: Collection[float]): warnings.warn("Computing dataset percentiles with experimental 'crick' library.") super().__init__() @@ -297,7 +308,7 @@ def finalize(self) -> Dict[DatasetPercentile, MeasureValue]: DatasetPercentilesCalculator = CrickPercentilesCalculator -class NaivSampleMeasureCalculator: +class NaiveSampleMeasureCalculator: """wrapper for measures to match interface of other sample measure calculators""" def __init__(self, tensor_id: TensorId, measure: SampleMeasure): @@ -310,7 +321,7 @@ def compute(self, sample: Sample) -> Dict[SampleMeasure, MeasureValue]: SampleMeasureCalculator = Union[ - MeanCalculator, MeanVarStdCalculator, SamplePercentilesCalculator, NaivSampleMeasureCalculator + MeanCalculator, MeanVarStdCalculator, SamplePercentilesCalculator, NaiveSampleMeasureCalculator ] DatasetMeasureCalculator = Union[MeanCalculator, MeanVarStdCalculator, DatasetPercentilesCalculator] diff --git a/bioimageio/core/stat_measures.py b/bioimageio/core/stat_measures.py index 726c90cf..99329498 100644 --- a/bioimageio/core/stat_measures.py +++ b/bioimageio/core/stat_measures.py @@ -32,10 +32,13 @@ class DatasetMeasureBase(MeasureBase, ABC): @dataclass(frozen=True) class _Mean: axes: Optional[Tuple[AxisId, ...]] = None + """`axes` to reduce""" @dataclass(frozen=True) class SampleMean(_Mean, SampleMeasureBase): + """The mean value of a single tensor""" + def compute(self, sample: Sample) -> MeasureValue: return sample.data[self.tensor_id].mean(dim=self.axes) @@ -45,6 +48,8 @@ def __post_init__(self): @dataclass(frozen=True) class DatasetMean(_Mean, DatasetMeasureBase): + """The mean value across multiple samples""" + def __post_init__(self): assert self.axes is None or AxisId("batch") in self.axes @@ -52,10 +57,13 @@ def __post_init__(self): @dataclass(frozen=True) class _Std: axes: Optional[Tuple[AxisId, ...]] = None + """`axes` to reduce""" @dataclass(frozen=True) class SampleStd(_Std, SampleMeasureBase): + """The standard deviation of a single tensor""" + def compute(self, sample: Sample) -> MeasureValue: return sample.data[self.tensor_id].std(dim=self.axes) @@ -65,6 +73,8 @@ def __post_init__(self): @dataclass(frozen=True) class DatasetStd(_Std, DatasetMeasureBase): + """The standard deviation across multiple samples""" + def __post_init__(self): assert self.axes is None or AxisId("batch") in self.axes @@ -72,10 +82,13 @@ def __post_init__(self): @dataclass(frozen=True) class _Var: axes: Optional[Tuple[AxisId, ...]] = None + """`axes` to reduce""" @dataclass(frozen=True) class SampleVar(_Var, SampleMeasureBase): + """The variance of a single tensor""" + def compute(self, sample: Sample) -> MeasureValue: return sample.data[self.tensor_id].var(dim=self.axes) @@ -85,6 +98,8 @@ def __post_init__(self): @dataclass(frozen=True) class DatasetVar(_Var, DatasetMeasureBase): + """The variance across multiple samples""" + def __post_init__(self): assert self.axes is None or AxisId("batch") in self.axes @@ -93,6 +108,7 @@ def __post_init__(self): class _Percentile: n: float axes: Optional[Tuple[AxisId, ...]] = None + """`axes` to reduce""" def __post_init__(self): assert self.n >= 0 @@ -101,6 +117,8 @@ def __post_init__(self): @dataclass(frozen=True) class SamplePercentile(_Percentile, SampleMeasureBase): + """The `n`th percentile of a single tensor""" + def compute(self, sample: Sample) -> MeasureValue: return sample.data[self.tensor_id].quantile(self.n / 100.0, dim=self.axes) @@ -111,6 +129,8 @@ def __post_init__(self): @dataclass(frozen=True) class DatasetPercentile(_Percentile, DatasetMeasureBase): + """The `n`th percentile across multiple samples""" + def __post_init__(self): super().__post_init__() assert self.axes is None or AxisId("batch") in self.axes diff --git a/bioimageio/core/utils/__init__.py b/bioimageio/core/utils/__init__.py index 426c8591..7126bd75 100644 --- a/bioimageio/core/utils/__init__.py +++ b/bioimageio/core/utils/__init__.py @@ -1,9 +1,9 @@ import sys from pathlib import Path +from ._digest_spec import get_test_inputs as get_test_inputs +from ._digest_spec import get_test_outputs as get_test_outputs from ._import_callable import import_callable as import_callable -from ._tensor_io import get_test_inputs as get_test_inputs -from ._tensor_io import get_test_outputs as get_test_outputs if sys.version_info < (3, 9): diff --git a/bioimageio/core/utils/_tensor_io.py b/bioimageio/core/utils/_digest_spec.py similarity index 100% rename from bioimageio/core/utils/_tensor_io.py rename to bioimageio/core/utils/_digest_spec.py diff --git a/bioimageio/core/image_helper.py b/bioimageio/core/utils/image_helper.py similarity index 100% rename from bioimageio/core/image_helper.py rename to bioimageio/core/utils/image_helper.py diff --git a/bioimageio/core/weight_converter/__init__.py b/bioimageio/core/weight_converter/__init__.py index e69de29b..5f1674c9 100644 --- a/bioimageio/core/weight_converter/__init__.py +++ b/bioimageio/core/weight_converter/__init__.py @@ -0,0 +1 @@ +"""coming soon""" diff --git a/bioimageio/core/weight_converter/keras/__init__.py b/bioimageio/core/weight_converter/keras/__init__.py index 471713e2..195b42b8 100644 --- a/bioimageio/core/weight_converter/keras/__init__.py +++ b/bioimageio/core/weight_converter/keras/__init__.py @@ -1 +1 @@ -from .tensorflow import convert_weights_to_tensorflow_saved_model_bundle +# TODO: update keras weight converters diff --git a/bioimageio/core/weight_converter/keras/tensorflow.py b/bioimageio/core/weight_converter/keras/_tensorflow.py similarity index 85% rename from bioimageio/core/weight_converter/keras/tensorflow.py rename to bioimageio/core/weight_converter/keras/_tensorflow.py index e6476a46..5fa6be54 100644 --- a/bioimageio/core/weight_converter/keras/tensorflow.py +++ b/bioimageio/core/weight_converter/keras/_tensorflow.py @@ -7,7 +7,7 @@ try: import tensorflow.saved_model except Exception: - tensorflow = None + _tensorflow = None from bioimageio.spec._internal.io_utils import download from bioimageio.spec.model.v0_5 import ModelDescr @@ -35,7 +35,7 @@ def _zip_model_bundle(model_bundle_folder: Path): def _convert_tf1(keras_weight_path: Path, output_path: Path, input_name: str, output_name: str, zip_weights: bool): try: # try to build the tf model with the keras import from tensorflow - from tensorflow import keras # type: ignore + from bioimageio.core.weight_converter.keras._tensorflow import keras # type: ignore except Exception: # if the above fails try to export with the standalone keras @@ -44,17 +44,17 @@ def _convert_tf1(keras_weight_path: Path, output_path: Path, input_name: str, ou @no_type_check def build_tf_model(): keras_model = keras.models.load_model(keras_weight_path) - assert tensorflow is not None - builder = tensorflow.saved_model.builder.SavedModelBuilder(output_path) - signature = tensorflow.saved_model.signature_def_utils.predict_signature_def( + assert _tensorflow is not None + builder = _tensorflow.saved_model.builder.SavedModelBuilder(output_path) + signature = _tensorflow.saved_model.signature_def_utils.predict_signature_def( inputs={input_name: keras_model.input}, outputs={output_name: keras_model.output} ) - signature_def_map = {tensorflow.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature} + signature_def_map = {_tensorflow.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature} builder.add_meta_graph_and_variables( keras.backend.get_session(), - [tensorflow.saved_model.tag_constants.SERVING], + [_tensorflow.saved_model.tag_constants.SERVING], signature_def_map=signature_def_map, ) builder.save() @@ -71,7 +71,7 @@ def build_tf_model(): def _convert_tf2(keras_weight_path: Path, output_path: Path, zip_weights: bool): try: # try to build the tf model with the keras import from tensorflow - from tensorflow import keras + from bioimageio.core.weight_converter.keras._tensorflow import keras except Exception: # if the above fails try to export with the standalone keras import keras @@ -96,8 +96,8 @@ def convert_weights_to_tensorflow_saved_model_bundle(model: ModelDescr, output_p model: The bioimageio model description output_path: where to save the tensorflow weights. This path must not exist yet. """ - assert tensorflow is not None - tf_major_ver = int(tensorflow.__version__.split(".")[0]) + assert _tensorflow is not None + tf_major_ver = int(_tensorflow.__version__.split(".")[0]) if output_path.suffix == ".zip": output_path = output_path.with_suffix("") diff --git a/bioimageio/core/weight_converter/torch/__init__.py b/bioimageio/core/weight_converter/torch/__init__.py index c7bda015..1b1ba526 100644 --- a/bioimageio/core/weight_converter/torch/__init__.py +++ b/bioimageio/core/weight_converter/torch/__init__.py @@ -1,2 +1 @@ -from .onnx import add_onnx_weights -from .torchscript import convert_weights_to_torchscript +# TODO: torch weight converters diff --git a/bioimageio/core/weight_converter/torch/onnx.py b/bioimageio/core/weight_converter/torch/_onnx.py similarity index 97% rename from bioimageio/core/weight_converter/torch/onnx.py rename to bioimageio/core/weight_converter/torch/_onnx.py index 9fa90de1..f9b66b9f 100644 --- a/bioimageio/core/weight_converter/torch/onnx.py +++ b/bioimageio/core/weight_converter/torch/_onnx.py @@ -7,7 +7,7 @@ from numpy.testing import assert_array_almost_equal from bioimageio.core.utils import get_test_inputs -from bioimageio.core.weight_converter.torch.utils import load_torch_model +from bioimageio.core.weight_converter.torch._utils import load_torch_model from bioimageio.spec import load_description from bioimageio.spec.common import InvalidDescr from bioimageio.spec.model import v0_4, v0_5 diff --git a/bioimageio/core/weight_converter/torch/torchscript.py b/bioimageio/core/weight_converter/torch/_torchscript.py similarity index 99% rename from bioimageio/core/weight_converter/torch/torchscript.py rename to bioimageio/core/weight_converter/torch/_torchscript.py index 0dd23442..e724dac2 100644 --- a/bioimageio/core/weight_converter/torch/torchscript.py +++ b/bioimageio/core/weight_converter/torch/_torchscript.py @@ -9,7 +9,7 @@ from bioimageio.spec.model import v0_4, v0_5 from bioimageio.spec.model.v0_5 import Version -from .utils import load_torch_model +from ._utils import load_torch_model # FIXME: remove Any diff --git a/bioimageio/core/weight_converter/torch/utils.py b/bioimageio/core/weight_converter/torch/_utils.py similarity index 100% rename from bioimageio/core/weight_converter/torch/utils.py rename to bioimageio/core/weight_converter/torch/_utils.py diff --git a/tests/conftest.py b/tests/conftest.py index dcf8e8d5..2355c48c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -112,28 +112,27 @@ skip_tensorflow = tensorflow is None skip_tensorflow_js = True # TODO: add a tensorflow_js example model +# load all model packages we need for testing +load_model_packages: Set[str] = set() +if not skip_torch: + load_model_packages |= set(TORCH_MODELS + TORCHSCRIPT_MODELS) + +if not skip_onnx: + load_model_packages |= set(ONNX_MODELS) + +if not skip_tensorflow: + load_model_packages |= set(TENSORFLOW_JS_MODELS) + if tf_major_version == 1: + load_model_packages |= set(KERAS_TF1_MODELS) + load_model_packages |= set(TENSORFLOW1_MODELS) + load_model_packages.add("stardist_wrong_shape") + load_model_packages.add("stardist_wrong_shape2") + elif tf_major_version == 2: + load_model_packages |= set(KERAS_TF2_MODELS) + load_model_packages |= set(TENSORFLOW2_MODELS) @fixture(scope="session") def model_packages() -> MappingProxyType[str, FilePath]: - # load all model packages we need for testing - load_model_packages: Set[str] = set() - if not skip_torch: - load_model_packages |= set(TORCH_MODELS + TORCHSCRIPT_MODELS) - - if not skip_onnx: - load_model_packages |= set(ONNX_MODELS) - - if not skip_tensorflow: - load_model_packages |= set(TENSORFLOW_JS_MODELS) - if tf_major_version == 1: - load_model_packages |= set(KERAS_TF1_MODELS) - load_model_packages |= set(TENSORFLOW1_MODELS) - load_model_packages.add("stardist_wrong_shape") - load_model_packages.add("stardist_wrong_shape2") - elif tf_major_version == 2: - load_model_packages |= set(KERAS_TF2_MODELS) - load_model_packages |= set(TENSORFLOW2_MODELS) - return MappingProxyType({name: save_bioimageio_package(MODEL_SOURCES[name]) for name in load_model_packages}) diff --git a/tests/test_cli.py b/tests/test_cli.py index f821e0c9..d70f21d8 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,9 +1,10 @@ import os import subprocess from pathlib import Path -from typing import Any, List, Optional, Sequence +from typing import Any, List, Optional, Sequence, Set import numpy as np +import pytest from bioimageio.core import load_description @@ -12,130 +13,108 @@ def run_subprocess(commands: Sequence[str], **kwargs: Any) -> "subprocess.Comple return subprocess.run(commands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8", **kwargs) -def test_validate_model(unet2d_nuclei_broad_model: Path): - ret = run_subprocess(["bioimageio", "validate", unet2d_nuclei_broad_model]) - assert ret.returncode == 0, ret.stdout +FIXTURES = {"unet2d_nuclei_broad_model"} -def test_cli_package(unet2d_nuclei_broad_model: Path, tmp_path: Path): - out_path = tmp_path / "model.zip" - ret = run_subprocess(["bioimageio", "package", unet2d_nuclei_broad_model, str(out_path)]) +@pytest.mark.parametrize( + "args", + [ + ["package", "unet2d_nuclei_broad_model", "--weight-format", "pytorch_state_dict"], + ["package", "unet2d_nuclei_broad_model"], + ["test-model", "unet2d_nuclei_broad_model", "--weight-format", "pytorch_state_dict"], + ["test-model", "unet2d_nuclei_broad_model"], + ], +) +def test_cli(args: List[str], request: pytest.FixtureRequest): + resolved_args = [str(request.getfixturevalue(arg)) if arg in FIXTURES else arg for arg in args] + ret = run_subprocess(["bioimageio", *resolved_args]) assert ret.returncode == 0, ret.stdout - assert out_path.exists() -def test_cli_package_wo_cache(unet2d_nuclei_broad_model: Path): - env = os.environ.copy() - env["BIOIMAGEIO_USE_CACHE"] = "false" - ret = run_subprocess(["bioimageio", "package", unet2d_nuclei_broad_model], env=env) - assert ret.returncode == 0, ret.stdout +@pytest.mark.parametrize("args", [["test-model", "stardist_wrong_shape"]]) +def test_cli_fails(args: List[str], request: pytest.FixtureRequest): + resolved_args = [str(request.getfixturevalue(arg)) if arg in FIXTURES else arg for arg in args] + ret = run_subprocess(["bioimageio", *resolved_args]) + assert ret.returncode == 1, ret.stdout -def test_cli_test_model(unet2d_nuclei_broad_model: Path): - ret = run_subprocess(["bioimageio", "test-model", unet2d_nuclei_broad_model]) - assert ret.returncode == 0, ret.stdout +# TODO: update CLI test +# def _test_cli_predict_image(model: Path, tmp_path: Path, extra_cmd_args: Optional[List[str]] = None): +# spec = load_description(model) +# in_path = spec.test_inputs[0] +# out_path = tmp_path.with_suffix(".npy") +# cmd = ["bioimageio", "predict-image", model, "--input", str(in_path), "--output", str(out_path)] +# if extra_cmd_args is not None: +# cmd.extend(extra_cmd_args) +# ret = run_subprocess(cmd) +# assert ret.returncode == 0, ret.stdout +# assert out_path.exists() -def test_cli_test_model_fail(stardist_wrong_shape: Path): - ret = run_subprocess(["bioimageio", "test-model", stardist_wrong_shape]) - assert ret.returncode == 1 +# def test_cli_predict_image(unet2d_nuclei_broad_model: Path, tmp_path: Path): +# _test_cli_predict_image(unet2d_nuclei_broad_model, tmp_path) -def test_cli_test_model_with_weight_format(unet2d_nuclei_broad_model: Path): - ret = run_subprocess( - ["bioimageio", "test-model", unet2d_nuclei_broad_model, "--weight-format", "pytorch_state_dict"] - ) - assert ret.returncode == 0, ret.stdout +# def test_cli_predict_image_with_weight_format(unet2d_nuclei_broad_model: Path, tmp_path: Path): +# _test_cli_predict_image(unet2d_nuclei_broad_model, tmp_path, ["--weight-format", "pytorch_state_dict"]) -def test_cli_test_resource(unet2d_nuclei_broad_model: Path): - ret = run_subprocess(["bioimageio", "test-resource", unet2d_nuclei_broad_model]) - assert ret.returncode == 0, ret.stdout +# def _test_cli_predict_images(model: Path, tmp_path: Path, extra_cmd_args: Optional[List[str]] = None): +# n_images = 3 +# shape = (1, 1, 128, 128) +# expected_shape = (1, 1, 128, 128) -def test_cli_test_resource_with_weight_format(unet2d_nuclei_broad_model: Path): - ret = run_subprocess( - ["bioimageio", "test-resource", unet2d_nuclei_broad_model, "--weight-format", "pytorch_state_dict"] - ) - assert ret.returncode == 0, ret.stdout +# in_folder = tmp_path / "inputs" +# in_folder.mkdir() +# out_folder = tmp_path / "outputs" +# out_folder.mkdir() +# expected_outputs: List[Path] = [] +# for i in range(n_images): +# path = in_folder / f"im-{i}.npy" +# im = np.random.randint(0, 255, size=shape).astype("uint8") +# np.save(path, im) +# expected_outputs.append(out_folder / f"im-{i}.npy") -def _test_cli_predict_image(model: Path, tmp_path: Path, extra_cmd_args: Optional[List[str]] = None): - spec = load_description(model) - in_path = spec.test_inputs[0] +# input_pattern = str(in_folder / "*.npy") +# cmd = ["bioimageio", "predict-images", str(model), input_pattern, str(out_folder)] +# if extra_cmd_args is not None: +# cmd.extend(extra_cmd_args) +# ret = run_subprocess(cmd) +# assert ret.returncode == 0, ret.stdout - out_path = tmp_path.with_suffix(".npy") - cmd = ["bioimageio", "predict-image", model, "--input", str(in_path), "--output", str(out_path)] - if extra_cmd_args is not None: - cmd.extend(extra_cmd_args) - ret = run_subprocess(cmd) - assert ret.returncode == 0, ret.stdout - assert out_path.exists() - - -def test_cli_predict_image(unet2d_nuclei_broad_model: Path, tmp_path: Path): - _test_cli_predict_image(unet2d_nuclei_broad_model, tmp_path) +# for out_path in expected_outputs: +# assert out_path.exists() +# assert np.load(out_path).shape == expected_shape -def test_cli_predict_image_with_weight_format(unet2d_nuclei_broad_model: Path, tmp_path: Path): - _test_cli_predict_image(unet2d_nuclei_broad_model, tmp_path, ["--weight-format", "pytorch_state_dict"]) +# def test_cli_predict_images(unet2d_nuclei_broad_model: Path, tmp_path: Path): +# _test_cli_predict_images(unet2d_nuclei_broad_model, tmp_path) -def _test_cli_predict_images(model: Path, tmp_path: Path, extra_cmd_args: Optional[List[str]] = None): - n_images = 3 - shape = (1, 1, 128, 128) - expected_shape = (1, 1, 128, 128) +# def test_cli_predict_images_with_weight_format(unet2d_nuclei_broad_model: Path, tmp_path: Path): +# _test_cli_predict_images(unet2d_nuclei_broad_model, tmp_path, ["--weight-format", "pytorch_state_dict"]) - in_folder = tmp_path / "inputs" - in_folder.mkdir() - out_folder = tmp_path / "outputs" - out_folder.mkdir() - expected_outputs: List[Path] = [] - for i in range(n_images): - path = in_folder / f"im-{i}.npy" - im = np.random.randint(0, 255, size=shape).astype("uint8") - np.save(path, im) - expected_outputs.append(out_folder / f"im-{i}.npy") - - input_pattern = str(in_folder / "*.npy") - cmd = ["bioimageio", "predict-images", str(model), input_pattern, str(out_folder)] - if extra_cmd_args is not None: - cmd.extend(extra_cmd_args) - ret = run_subprocess(cmd) - assert ret.returncode == 0, ret.stdout +# def test_torch_to_torchscript(unet2d_nuclei_broad_model: Path, tmp_path: Path): +# out_path = tmp_path.with_suffix(".pt") +# ret = run_subprocess( +# ["bioimageio", "convert-torch-weights-to-torchscript", str(unet2d_nuclei_broad_model), str(out_path)] +# ) +# assert ret.returncode == 0, ret.stdout +# assert out_path.exists() - for out_path in expected_outputs: - assert out_path.exists() - assert np.load(out_path).shape == expected_shape +# def test_torch_to_onnx(convert_to_onnx: Path, tmp_path: Path): +# out_path = tmp_path.with_suffix(".onnx") +# ret = run_subprocess(["bioimageio", "convert-torch-weights-to-onnx", str(convert_to_onnx), str(out_path)]) +# assert ret.returncode == 0, ret.stdout +# assert out_path.exists() -def test_cli_predict_images(unet2d_nuclei_broad_model: Path, tmp_path: Path): - _test_cli_predict_images(unet2d_nuclei_broad_model, tmp_path) - -def test_cli_predict_images_with_weight_format(unet2d_nuclei_broad_model: Path, tmp_path: Path): - _test_cli_predict_images(unet2d_nuclei_broad_model, tmp_path, ["--weight-format", "pytorch_state_dict"]) - - -def test_torch_to_torchscript(unet2d_nuclei_broad_model: Path, tmp_path: Path): - out_path = tmp_path.with_suffix(".pt") - ret = run_subprocess( - ["bioimageio", "convert-torch-weights-to-torchscript", str(unet2d_nuclei_broad_model), str(out_path)] - ) - assert ret.returncode == 0, ret.stdout - assert out_path.exists() - - -def test_torch_to_onnx(convert_to_onnx: Path, tmp_path: Path): - out_path = tmp_path.with_suffix(".onnx") - ret = run_subprocess(["bioimageio", "convert-torch-weights-to-onnx", str(convert_to_onnx), str(out_path)]) - assert ret.returncode == 0, ret.stdout - assert out_path.exists() - - -def test_keras_to_tf(unet2d_keras: Path, tmp_path: Path): - out_path = tmp_path / "weights.zip" - ret = run_subprocess(["bioimageio", "convert-keras-weights-to-tensorflow", str(unet2d_keras), str(out_path)]) - assert ret.returncode == 0, ret.stdout - assert out_path.exists() +# def test_keras_to_tf(unet2d_keras: Path, tmp_path: Path): +# out_path = tmp_path / "weights.zip" +# ret = run_subprocess(["bioimageio", "convert-keras-weights-to-tensorflow", str(unet2d_keras), str(out_path)]) +# assert ret.returncode == 0, ret.stdout +# assert out_path.exists() diff --git a/tests/test_prediction_pipeline.py b/tests/test_prediction_pipeline.py index d4b16c12..5347380a 100644 --- a/tests/test_prediction_pipeline.py +++ b/tests/test_prediction_pipeline.py @@ -9,7 +9,7 @@ def _test_prediction_pipeline(model_package: Path, weights_format: WeightsFormat): - from bioimageio.core.prediction_pipeline import create_prediction_pipeline + from bioimageio.core._prediction_pipeline import create_prediction_pipeline bio_model = load_description(model_package) assert isinstance(bio_model, (ModelDescr, ModelDescr04)) diff --git a/tests/test_prediction_pipeline_device_management.py b/tests/test_prediction_pipeline_device_management.py index bda4af08..16354d18 100644 --- a/tests/test_prediction_pipeline_device_management.py +++ b/tests/test_prediction_pipeline_device_management.py @@ -21,7 +21,7 @@ def _test_device_management(model_package: Path, weight_format: WeightsFormat): if torch.cuda.device_count() == 0: raise TooFewDevicesException("Need at least one cuda device for this test") - from bioimageio.core.prediction_pipeline import create_prediction_pipeline + from bioimageio.core._prediction_pipeline import create_prediction_pipeline bio_model = load_description(model_package) assert isinstance(bio_model, (ModelDescr, ModelDescr04)) diff --git a/tests/test_resource_tests.py b/tests/test_resource_tests.py index 970bf2e2..9f69721a 100644 --- a/tests/test_resource_tests.py +++ b/tests/test_resource_tests.py @@ -4,7 +4,7 @@ def test_error_for_wrong_shape(stardist_wrong_shape: Path): - from bioimageio.core.resource_tests import test_model + from bioimageio.core._resource_tests import test_model summary = test_model(stardist_wrong_shape) expected_error_message = ( @@ -16,7 +16,7 @@ def test_error_for_wrong_shape(stardist_wrong_shape: Path): def test_error_for_wrong_shape2(stardist_wrong_shape2: Path): - from bioimageio.core.resource_tests import test_model + from bioimageio.core._resource_tests import test_model summary = test_model(stardist_wrong_shape2) expected_error_message = ( @@ -27,14 +27,14 @@ def test_error_for_wrong_shape2(stardist_wrong_shape2: Path): def test_test_model(any_model: Path): - from bioimageio.core.resource_tests import test_model + from bioimageio.core._resource_tests import test_model summary = test_model(any_model) assert summary.status == "passed" def test_test_resource(any_model: Path): - from bioimageio.core.resource_tests import test_description + from bioimageio.core._resource_tests import test_description summary = test_description(any_model) assert summary.status == "passed" @@ -42,7 +42,7 @@ def test_test_resource(any_model: Path): def test_validation_section_warning(unet2d_nuclei_broad_model: str, tmp_path: Path): from bioimageio.core import load_description - from bioimageio.core.resource_tests import test_description + from bioimageio.core._resource_tests import test_description model = load_description(unet2d_nuclei_broad_model) assert not isinstance(model, InvalidDescr) @@ -65,7 +65,7 @@ def test_issue289(unet2d_nuclei_broad_model: str): # remote model is a pytorch model, needing unet2d_nuclei_broad_model skips the test when needed _ = unet2d_nuclei_broad_model - from bioimageio.core.resource_tests import test_model + from bioimageio.core._resource_tests import test_model doi = "10.5281/zenodo.6287342" summary = test_model(doi) diff --git a/tests/test_image_helper.py b/tests/utils/test_image_helper.py similarity index 84% rename from tests/test_image_helper.py rename to tests/utils/test_image_helper.py index d9721fc2..8e86a919 100644 --- a/tests/test_image_helper.py +++ b/tests/utils/test_image_helper.py @@ -2,7 +2,7 @@ def test_transform_input_image(): - from bioimageio.core.image_helper import transpose_image + from bioimageio.core.utils.image_helper import transpose_image ax_list = ["yx", "xy", "cyx", "yxc", "bczyx", "xyz", "xyzc", "bzyxc"] im = np.random.rand(256, 256) @@ -18,7 +18,7 @@ def test_transform_input_image(): def test_transform_output_tensor(): - from bioimageio.core.image_helper import transform_output_tensor + from bioimageio.core.utils.image_helper import transform_output_tensor tensor = np.random.rand(1, 3, 64, 64, 64) tensor_axes = "bczyx" diff --git a/tests/weight_converter/keras/test_tensorflow.py b/tests/weight_converter/keras/test_tensorflow.py index 5cc7f297..6cc42c57 100644 --- a/tests/weight_converter/keras/test_tensorflow.py +++ b/tests/weight_converter/keras/test_tensorflow.py @@ -1,10 +1,13 @@ import zipfile from pathlib import Path +import pytest + from bioimageio.spec import load_description from bioimageio.spec.model.v0_5 import ModelDescr +@pytest.mark.skip("tensorflow converter not updated yet") # TODO: test tensorflow converter def test_tensorflow_converter(any_keras_model: Path, tmp_path: Path): from bioimageio.core.weight_converter.keras import convert_weights_to_tensorflow_saved_model_bundle @@ -18,6 +21,7 @@ def test_tensorflow_converter(any_keras_model: Path, tmp_path: Path): assert ret_val == 0 # check for correctness is done in converter and returns 0 if it passes +@pytest.mark.skip("tensorflow converter not updated yet") # TODO: test tensorflow converter def test_tensorflow_converter_zipped(any_keras_model: Path, tmp_path: Path): from bioimageio.core.weight_converter.keras import convert_weights_to_tensorflow_saved_model_bundle diff --git a/tests/weight_converter/test_add_weights.py b/tests/weight_converter/test_add_weights.py index e3df4b80..836353c7 100644 --- a/tests/weight_converter/test_add_weights.py +++ b/tests/weight_converter/test_add_weights.py @@ -1,47 +1,48 @@ -import os +# TODO: update add weights tests +# import os -def _test_add_weights(model, tmp_path, base_weights, added_weights, **kwargs): - from bioimageio.core.build_spec import add_weights +# def _test_add_weights(model, tmp_path, base_weights, added_weights, **kwargs): +# from bioimageio.core.build_spec import add_weights - rdf = load_raw_resource_description(model) - assert base_weights in rdf.weights - assert added_weights in rdf.weights +# rdf = load_raw_resource_description(model) +# assert base_weights in rdf.weights +# assert added_weights in rdf.weights - weight_path = load_description(model).weights[added_weights].source - assert weight_path.exists() +# weight_path = load_description(model).weights[added_weights].source +# assert weight_path.exists() - drop_weights = set(rdf.weights.keys()) - {base_weights} - for drop in drop_weights: - rdf.weights.pop(drop) - assert tuple(rdf.weights.keys()) == (base_weights,) +# drop_weights = set(rdf.weights.keys()) - {base_weights} +# for drop in drop_weights: +# rdf.weights.pop(drop) +# assert tuple(rdf.weights.keys()) == (base_weights,) - in_path = tmp_path / "model1.zip" - export_resource_package(rdf, output_path=in_path) +# in_path = tmp_path / "model1.zip" +# export_resource_package(rdf, output_path=in_path) - out_path = tmp_path / "model2.zip" - add_weights(in_path, weight_path, weight_type=added_weights, output_path=out_path, **kwargs) +# out_path = tmp_path / "model2.zip" +# add_weights(in_path, weight_path, weight_type=added_weights, output_path=out_path, **kwargs) - assert out_path.exists() - new_rdf = load_description(out_path) - assert set(new_rdf.weights.keys()) == {base_weights, added_weights} - for weight in new_rdf.weights.values(): - assert weight.source.exists() +# assert out_path.exists() +# new_rdf = load_description(out_path) +# assert set(new_rdf.weights.keys()) == {base_weights, added_weights} +# for weight in new_rdf.weights.values(): +# assert weight.source.exists() - test_res = _test_model(out_path, added_weights) - failed = [s for s in test_res if s["status"] != "passed"] - assert not failed, failed - test_res = _test_model(out_path) - failed = [s for s in test_res if s["status"] != "passed"] - assert not failed, failed +# test_res = _test_model(out_path, added_weights) +# failed = [s for s in test_res if s["status"] != "passed"] +# assert not failed, failed +# test_res = _test_model(out_path) +# failed = [s for s in test_res if s["status"] != "passed"] +# assert not failed, failed - # make sure the weights were cleaned from the cwd - assert not os.path.exists(os.path.split(weight_path)[1]) +# # make sure the weights were cleaned from the cwd +# assert not os.path.exists(os.path.split(weight_path)[1]) -def test_add_torchscript(unet2d_nuclei_broad_model, tmp_path): - _test_add_weights(unet2d_nuclei_broad_model, tmp_path, "pytorch_state_dict", "torchscript") +# def test_add_torchscript(unet2d_nuclei_broad_model, tmp_path): +# _test_add_weights(unet2d_nuclei_broad_model, tmp_path, "pytorch_state_dict", "torchscript") -def test_add_onnx(unet2d_nuclei_broad_model, tmp_path): - _test_add_weights(unet2d_nuclei_broad_model, tmp_path, "pytorch_state_dict", "onnx", opset_version=12) +# def test_add_onnx(unet2d_nuclei_broad_model, tmp_path): +# _test_add_weights(unet2d_nuclei_broad_model, tmp_path, "pytorch_state_dict", "onnx", opset_version=12) diff --git a/tests/weight_converter/torch/test_onnx.py b/tests/weight_converter/torch/test_onnx.py index bc757806..c2efbcd8 100644 --- a/tests/weight_converter/torch/test_onnx.py +++ b/tests/weight_converter/torch/test_onnx.py @@ -4,9 +4,9 @@ import pytest -# todo: test with 'any_torch_model' -def test_onnx_converter(convert_to_onnx: Path, tmp_path, Path): - from bioimageio.core.weight_converter.torch.onnx import convert_weights_to_onnx +@pytest.mark.skip("onnx converter not updated yet") # TODO: test onnx converter +def test_onnx_converter(convert_to_onnx: Path, tmp_path: Path): + from bioimageio.core.weight_converter.torch._onnx import convert_weights_to_onnx out_path = tmp_path / "weights.onnx" ret_val = convert_weights_to_onnx(convert_to_onnx, out_path, test_decimal=3) diff --git a/tests/weight_converter/torch/test_torchscript.py b/tests/weight_converter/torch/test_torchscript.py index 2c1e47d2..e3f6e42c 100644 --- a/tests/weight_converter/torch/test_torchscript.py +++ b/tests/weight_converter/torch/test_torchscript.py @@ -1,7 +1,11 @@ from pathlib import Path + +import pytest + from bioimageio.spec.model import v0_4, v0_5 +@pytest.mark.skip("torchscript converter not updated yet") # TODO: test torchscript converter def test_torchscript_converter(any_torch_model: "v0_4.ModelDescr | v0_5.ModelDescr", tmp_path: Path): from bioimageio.core.weight_converter.torch import convert_weights_to_torchscript