Skip to content

Commit

Permalink
add model inference tests for various test input sizes
Browse files Browse the repository at this point in the history
  • Loading branch information
FynnBe committed Mar 7, 2024
1 parent 6f1c58c commit 92d4373
Show file tree
Hide file tree
Showing 12 changed files with 120 additions and 110 deletions.
7 changes: 1 addition & 6 deletions bioimageio/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,6 @@

import json

from bioimageio.core.utils import files

with files("bioimageio.core").joinpath("VERSION").open("r", encoding="utf-8") as f:
__version__: str = json.load(f)["version"]
assert isinstance(__version__, str)

from bioimageio.spec import build_description as build_description
from bioimageio.spec import dump_description as dump_description
from bioimageio.spec import load_description as load_description
Expand All @@ -24,5 +18,6 @@
from ._resource_tests import load_description_and_test as load_description_and_test
from ._resource_tests import test_description as test_description
from ._resource_tests import test_model as test_model
from .utils import VERSION as __version__

test_resource = test_description
4 changes: 2 additions & 2 deletions bioimageio/core/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ def package(
"--weights-priority-order",
"-wpo",
help="For model packages only. "
"If given, only the first matching weights entry is included. "
"Defaults to including all weights present in source.",
+ "If given, only the first matching weights entry is included. "
+ "Defaults to including all weights present in source.",
show_default=False,
),
] = None,
Expand Down
118 changes: 74 additions & 44 deletions bioimageio/core/_resource_tests.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,45 @@
import traceback
import warnings
from typing import List, Literal, Optional, Union
from typing import List, Literal, Optional, Sequence, Set, Tuple, Union

import numpy as np
import xarray as xr

from bioimageio.core import __version__ as bioimageio_core_version
from bioimageio.core import create_prediction_pipeline, PredictionPipeline
from bioimageio.core._prediction_pipeline import create_prediction_pipeline
from bioimageio.core.common import AxisId, BatchSize
from bioimageio.core.utils import VERSION
from bioimageio.core.utils.image_helper import pad_to
from bioimageio.spec import InvalidDescr, ResourceDescr, build_description, dump_description, load_description
from bioimageio.spec._internal.common_nodes import ResourceDescrBase
from bioimageio.spec._internal.io_utils import load_array
from bioimageio.spec.common import BioimageioYamlContent, FileSource
from bioimageio.spec.common import BioimageioYamlContent, PermissiveFileSource
from bioimageio.spec.model import v0_4, v0_5
from bioimageio.spec.model.v0_5 import WeightsFormat
from bioimageio.spec.summary import ErrorEntry, InstalledPackage, ValidationDetail, ValidationSummary


def test_model(
source: FileSource,
source: PermissiveFileSource,
weight_format: Optional[WeightsFormat] = None,
devices: Optional[List[str]] = None,
decimal: int = 4,
) -> ValidationSummary:
"""Test whether the test output(s) of a model can be reproduced."""
"""Test model inference"""
return test_description(
source, weight_format=weight_format, devices=devices, decimal=decimal, expected_type="model"
)


def test_description(
source: Union[ResourceDescr, FileSource, BioimageioYamlContent],
source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
*,
format_version: Union[Literal["discover", "latest"], str] = "discover",
weight_format: Optional[WeightsFormat] = None,
devices: Optional[List[str]] = None,
decimal: int = 4,
expected_type: Optional[str] = None,
) -> ValidationSummary:
"""Test RDF dynamically, e.g. model inference of test inputs"""
"""Test a bioimage.io resource dynamically, e.g. prediction of test tensors for models"""
rd = load_description_and_test(
source,
format_version=format_version,
Expand All @@ -50,7 +52,7 @@ def test_description(


def load_description_and_test(
source: Union[ResourceDescr, FileSource, BioimageioYamlContent],
source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
*,
format_version: Union[Literal["discover", "latest"], str] = "discover",
weight_format: Optional[WeightsFormat] = None,
Expand All @@ -74,20 +76,24 @@ def load_description_and_test(
else:
rd = load_description(source, format_version=format_version)

rd.validation_summary.env.append(InstalledPackage(name="bioimageio.core", version=bioimageio_core_version))
rd.validation_summary.env.append(InstalledPackage(name="bioimageio.core", version=VERSION))

if expected_type is not None:
_test_expected_resource_type(rd, expected_type)

if isinstance(rd, (v0_4.ModelDescr, v0_5.ModelDescr)):
_test_model_inference(rd, weight_format, devices, decimal)
if not isinstance(rd, v0_4.ModelDescr):
_test_model_inference_with_parametrized_inputs(rd, weight_format, devices)
if isinstance(rd, v0_4.ModelDescr):
_test_model_inference_v0_4(rd, weight_format, devices, decimal)
else:
_test_model_inference_impl(rd, weight_format, devices)

# TODO: add execution of jupyter notebooks
# TODO: add more tests

return rd


def _test_model_inference(
def _test_model_inference_v0_4(
model: Union[v0_4.ModelDescr, v0_5.ModelDescr],
weight_format: Optional[WeightsFormat],
devices: Optional[List[str]],
Expand Down Expand Up @@ -115,9 +121,7 @@ def _test_model_inference(
results = prediction_pipeline.forward(*inputs)

if len(results) != len(expected):
error = (error or "") + (
f"Expected {len(expected)} outputs, but got {len(results)}"
)
error = (error or "") + (f"Expected {len(expected)} outputs, but got {len(results)}")
else:
for res, exp in zip(results, expected):
try:
Expand Down Expand Up @@ -147,65 +151,91 @@ def _test_model_inference(
)
)

def _test_model_inference_with_parametrized_inputs(

def _test_model_inference_impl(
model: v0_5.ModelDescr,
weight_format: Optional[WeightsFormat],
devices: Optional[List[str]],
test_cases: Sequence[Tuple[v0_5.ParameterizedSize.N, BatchSize]] = ((0, 1), (1, 3), (2, 1), (3, 2)),
) -> None:
if not any(isinstance(a.size, v0_5.ParameterizedSize) for ipt in model.inputs for a in ipt.axes):
return

error: Optional[str] = None
tb: List[str] = []
try:
test_inputs = [
xr.DataArray(load_array(d.test_tensor.download().path), dims=tuple(a.id for a in d.axes))
for d in model.inputs
]
def generate_test_cases():
for n in [0, 1, 2, 3]:

def generate_test_cases():
tested: Set[str] = set()
for n, batch_size in test_cases:
target_sizes = model.get_tensor_sizes(n, batch_size=batch_size)
hashable_target_size = str(target_sizes)
if hashable_target_size in tested:
continue
else:
tested.add(hashable_target_size)

resized_test_inputs = [
pad_to(t, target_sizes[t_descr.id]) for t, t_descr in zip(test_inputs, model.inputs)
]
expected_output_shapes = [target_sizes[t_descr.id] for t_descr in model.outputs]
yield n, batch_size, resized_test_inputs, expected_output_shapes

with create_prediction_pipeline(
bioimageio_model=model, devices=devices, weight_format=weight_format
) as prediction_pipeline:
for n, inputs, exptected_output_shape in generate_test_cases():
results = prediction_pipeline.forward(*inputs)

for n, batch_size, inputs, exptected_output_shape in generate_test_cases():
error: Optional[str] = None
results = prediction_pipeline.forward(*inputs)
if len(results) != len(exptected_output_shape):
error = (error or "") + (
f"Expected {len(exptected_output_shape)} outputs, but got {len(results)}"
)
error = (error or "") + (f"Expected {len(exptected_output_shape)} outputs, but got {len(results)}")
else:
for res, exp in zip(results, exptected_output_shape):
if res.shape != exp:
error = (error or "") + f"(n={n}) Expected output shape {exptected_output_shape}, but got {res.shape}\n"

if error:
break
if diff := {a: s for a, s in res.sizes.items() if s != exp[AxisId(str(a))]}:
error = (
(error or "")
+ f"(n={n}) Expected output shape {exp},"
+ f" but got {exptected_output_shape} ({diff})\n"
)

model.validation_summary.add_detail(
ValidationDetail(
name="Reproduce test outputs from test inputs with batch_size:"
+ f" {batch_size} and size parameter n: {n}",
status="passed" if error is None else "failed",
errors=(
[]
if error is None
else [
ErrorEntry(
loc=("weights",) if weight_format is None else ("weights", weight_format),
msg=error,
type="bioimageio.core",
)
]
),
)
)
except Exception as e:
error = str(e)
tb = traceback.format_tb(e.__traceback__)

model.validation_summary.add_detail(
ValidationDetail(
name="Reproduce test outputs from test inputs",
status="passed" if error is None else "failed",
errors=(
[]
if error is None
else [
model.validation_summary.add_detail(
ValidationDetail(
name="Reproduce test outputs from test inputs",
status="failed",
errors=[
ErrorEntry(
loc=("weights",) if weight_format is None else ("weights", weight_format),
msg=error,
type="bioimageio.core",
traceback=tb,
)
]
),
],
)
)
)


def _test_expected_resource_type(rd: Union[InvalidDescr, ResourceDescr], expected_type: str):
Expand Down
2 changes: 1 addition & 1 deletion bioimageio/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

TensorId = v0_5.TensorId
AxisId = v0_5.AxisId

BatchSize = int
Tensor = xr.DataArray

Data = Dict[TensorId, Tensor]
Expand Down
3 changes: 0 additions & 3 deletions bioimageio/core/model_adapters/_onnx_model_adapter.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
import warnings
from typing import Any, List, Optional, Sequence, Union

Expand All @@ -14,8 +13,6 @@
except Exception:
rt = None

logger = logging.getLogger(__name__)


class ONNXModelAdapter(ModelAdapter):
def __init__(
Expand Down
6 changes: 6 additions & 0 deletions bioimageio/core/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import sys
from pathlib import Path

Expand All @@ -13,3 +14,8 @@ def files(package_name: str):

else:
from importlib.resources import files as files


with files("bioimageio.core").joinpath("VERSION").open("r", encoding="utf-8") as f:
VERSION = json.load(f)["version"]
assert isinstance(VERSION, str)
16 changes: 8 additions & 8 deletions bioimageio/core/utils/image_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,10 @@ def load_tensor(

def pad(
tensor: xr.DataArray,
pad_with: Mapping[AxisId, Union[int, Tuple[int, int]]],
pad_width: Mapping[AxisId, Union[int, Tuple[int, int]]],
mode: Literal["edge", "reflect", "symmetric"] = "symmetric",
):
return tensor.pad(pad_with=pad_with, mode=mode)
return tensor.pad(pad_width=pad_width, mode=mode)


def pad_to(
Expand All @@ -148,11 +148,11 @@ def pad_to(
else:
pad_axis_where = pad_where

pad_with: Dict[AxisId, Union[int, Tuple[int, int]]] = {}
pad_width: Dict[AxisId, Union[int, Tuple[int, int]]] = {}
for a, s_is in tensor.sizes.items():
a = AxisId(str(a))
if a not in sizes or sizes[a] == s_is:
pad_with[a] = 0
pad_width[a] = 0
elif s_is < sizes[a]:
raise ValueError(f"Cannot pad axis {a} of size {s_is} to smaller size {sizes[a]}")
elif a not in pad_axis_where:
Expand All @@ -161,15 +161,15 @@ def pad_to(
pad_this_axis_where = pad_axis_where[a]
p = sizes[a] - s_is
if pad_this_axis_where == "before":
pad_with[a] = (p, 0)
pad_width[a] = (p, 0)
elif pad_this_axis_where == "after":
pad_with[a] = (0, p)
pad_width[a] = (0, p)
elif pad_this_axis_where == "center":
pad_with[a] = (left := p // 2, p - left)
pad_width[a] = (left := p // 2, p - left)
else:
assert_never(pad_this_axis_where)

return pad(tensor, pad_with, mode)
return pad(tensor, pad_width, mode)


def pad_old(image, axes: Sequence[str], padding, pad_right=True) -> Tuple[np.ndarray, Dict[str, slice]]:
Expand Down
9 changes: 9 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,25 @@ exclude = ["**/node_modules", "**/__pycache__", "tests/old_*"]
include = ["bioimageio", "scripts", "tests"]
pythonPlatform = "All"
pythonVersion = "3.8"
reportDuplicateImport = "error"
reportImplicitStringConcatenation = "warning"
reportIncompatibleMethodOverride = true
reportMatchNotExhaustive = "error"
reportMissingSuperCall = "error"
reportMissingTypeArgument = true
reportMissingTypeStubs = "warning"
reportPropertyTypeMismatch = "error"
reportUninitializedInstanceVariable = "error"
reportUnknownMemberType = false
reportUnnecessaryIsInstance = false
reportUnnecessaryTypeIgnoreComment = "error"
reportUnsupportedDunderAll = "error"
reportUnusedCallResult = "error"
reportUnusedClass = "error"
reportUnusedExpression = "error"
reportUnusedFunction = "error"
reportUnusedVariable = "error"
reportWildcardImportFromLibrary = "error"
typeCheckingMode = "strict"
useLibraryCodeForTypes = true

Expand Down
4 changes: 2 additions & 2 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,19 @@
from __future__ import annotations

import logging
import os
import subprocess
import warnings
from types import MappingProxyType
from typing import List, Set

from loguru import logger
from pydantic import FilePath
from pytest import FixtureRequest, fixture

os.environ["BIOIMAGEIO_COUNT_RDF_DOWNLOADS"] = "false" # disable tracking before bioimageio imports
from bioimageio.spec import __version__ as bioimageio_spec_version
from bioimageio.spec._package import save_bioimageio_package

logger = logging.getLogger(__name__)
warnings.warn(f"testing with bioimageio.spec {bioimageio_spec_version}")

# test models for various frameworks
Expand Down Expand Up @@ -131,6 +130,7 @@
load_model_packages |= set(KERAS_TF2_MODELS)
load_model_packages |= set(TENSORFLOW2_MODELS)


@fixture(scope="session")
def model_packages() -> MappingProxyType[str, FilePath]:
return MappingProxyType({name: save_bioimageio_package(MODEL_SOURCES[name]) for name in load_model_packages})
Expand Down
Loading

0 comments on commit 92d4373

Please sign in to comment.