Skip to content

Commit

Permalink
Speed up test pipeline (#831)
Browse files Browse the repository at this point in the history
* Convert test pipeline into matrix across tox envs

* Reorder tox envs

* If each worker runs a diff tox env, no need to maximise space

* Rename base env to mlserver

* Only install tox in test pipeline

* Fix relative path

* Revert back to single dev.txt reqs

* Remove unneeded extra reqs

* Move around dev deps installation to make pip's life easier

* Fix HF tests

* Add types-requests to main dev deps

* Fix all-runtime tests

* Convert all tests folders into Python packages and change Pytest's importmode

* Change test imports to relative

* Use python -m pytest to call pytest

* Format
  • Loading branch information
Adrian Gonzalez-Martin authored Nov 9, 2022
1 parent 9d95eb7 commit 4377f21
Show file tree
Hide file tree
Showing 20 changed files with 86 additions and 74 deletions.
21 changes: 12 additions & 9 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,22 +29,25 @@ jobs:
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10"]
tox-environment:
- mlserver
- sklearn
- xgboost
- lightgbm
- mlflow
- huggingface
- alibi-explain
- alibi-detect
- all-runtimes
steps:
- name: Maximize build space
uses: easimon/maximize-build-space@master
with:
remove-dotnet: "true"
remove-haskell: "true"
remove-android: "true"
root-reserve-mb: "20480"
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
make install-dev
pip install -r requirements/dev.txt
- name: Test
run: |
make test
tox -e ${{ matrix.tox-environment }}
1 change: 1 addition & 0 deletions requirements/dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ mypy-protobuf==3.1.0
types-protobuf==3.20.4.1
types-orjson==3.6.2
types-aiofiles==22.1.0
types-requests==2.28.11.2
black==22.10.0

# Pushing to PyPi
Expand Down
Empty file.
2 changes: 0 additions & 2 deletions runtimes/alibi-explain/requirements/dev.txt

This file was deleted.

2 changes: 1 addition & 1 deletion runtimes/alibi-explain/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def _load_description() -> str:
"alibi[shap, tensorflow]",
"orjson",
# numba 0.55.0 requires: numpy <1.22 (for Shap / alibi)
"numpy<1.22",
"numpy<=1.22",
],
long_description=_load_description(),
long_description_content_type="text/markdown",
Expand Down
Empty file.
6 changes: 3 additions & 3 deletions runtimes/alibi-explain/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@
from mlserver_alibi_explain.common import AlibiExplainSettings
from mlserver_alibi_explain.runtime import AlibiExplainRuntime, AlibiExplainRuntimeBase

from helpers.tf_model import get_tf_mnist_model_uri
from helpers.run_async import run_async_as_sync
from .helpers.tf_model import get_tf_mnist_model_uri, TFMNISTModel
from .helpers.run_async import run_async_as_sync

TESTS_PATH = Path(os.path.dirname(__file__))
_ANCHOR_IMAGE_DIR = TESTS_PATH / ".data" / "mnist_anchor_image"
Expand All @@ -55,7 +55,7 @@ def event_loop():
def custom_runtime_tf_settings() -> ModelSettings:
return ModelSettings(
name="custom_tf_mnist_model",
implementation="helpers.tf_model.TFMNISTModel",
implementation=TFMNISTModel,
)


Expand Down
2 changes: 1 addition & 1 deletion runtimes/alibi-explain/tests/test_alibi_runtime_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from mlserver_alibi_explain.runtime import AlibiExplainRuntime, AlibiExplainRuntimeBase
from mlserver_alibi_explain.errors import InvalidExplanationShape

from helpers.run_async import run_async_as_sync
from .helpers.run_async import run_async_as_sync

"""
Smoke tests for runtimes
Expand Down
3 changes: 2 additions & 1 deletion runtimes/alibi-explain/tests/test_black_box.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from alibi.saving import load_explainer
from numpy.testing import assert_allclose

from helpers.tf_model import get_tf_mnist_model_uri
from mlserver import MLModel
from mlserver.codecs import NumpyCodec, StringCodec
from mlserver.types import (
Expand All @@ -28,6 +27,8 @@
_DEFAULT_INPUT_NAME,
)

from .helpers.tf_model import get_tf_mnist_model_uri

TESTS_PATH = Path(os.path.dirname(__file__))
_DEFAULT_ID_NAME = "dummy_id"

Expand Down
3 changes: 2 additions & 1 deletion runtimes/alibi-explain/tests/test_integrated_gradients.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@
from alibi.explainers import IntegratedGradients
from numpy.testing import assert_array_almost_equal

from helpers.tf_model import get_tf_mnist_model_uri
from mlserver.codecs import NumpyCodec
from mlserver.types import InferenceRequest, Parameters, RequestInput
from mlserver_alibi_explain.common import convert_from_bytes

from .helpers.tf_model import get_tf_mnist_model_uri


@pytest.fixture()
def payload() -> InferenceRequest:
Expand Down
3 changes: 3 additions & 0 deletions runtimes/huggingface/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ def _load_description() -> str:
install_requires=[
"mlserver",
"optimum[onnxruntime]>=1.2.3",
# Pin `transformers`, otherwise we risk falling into this issue:
# https://github.com/huggingface/optimum/issues/344
"transformers<=4.21.1",
],
long_description=_load_description(),
long_description_content_type="text/markdown",
Expand Down
Empty file.
30 changes: 18 additions & 12 deletions runtimes/huggingface/tests/test_codecs.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,13 @@
InferenceRequest(
inputs=[
RequestInput(
name="foo", datatype="BYTES", data=["bar1", "bar2"], shape=[2]
name="foo",
datatype="BYTES",
data=["bar1", "bar2"],
shape=[2, 1],
),
RequestInput(
name="foo2", datatype="BYTES", data=["var1"], shape=[1]
name="foo2", datatype="BYTES", data=["var1"], shape=[1, 1]
),
]
),
Expand All @@ -46,7 +49,7 @@ def test_decode_request(inference_request, expected):
name="foo",
datatype="BYTES",
data=[b"bar1", b"bar2"],
shape=[2],
shape=[2, 1],
parameters=Parameters(
content_type=MultiStringRequestCodec.InputCodec.ContentType
),
Expand All @@ -55,7 +58,7 @@ def test_decode_request(inference_request, expected):
name="foo2",
datatype="BYTES",
data=[b"var1"],
shape=[1],
shape=[1, 1],
parameters=Parameters(
content_type=MultiStringRequestCodec.InputCodec.ContentType
),
Expand All @@ -73,7 +76,7 @@ def test_decode_request(inference_request, expected):
name="foo",
datatype="BYTES",
data=["bar1", "bar2"],
shape=[2],
shape=[2, 1],
parameters=Parameters(
content_type=MultiStringRequestCodec.InputCodec.ContentType
),
Expand All @@ -82,7 +85,7 @@ def test_decode_request(inference_request, expected):
name="foo2",
datatype="BYTES",
data=["var1"],
shape=[1],
shape=[1, 1],
parameters=Parameters(
content_type=MultiStringRequestCodec.InputCodec.ContentType
),
Expand All @@ -108,10 +111,13 @@ def test_encode_request(payload, use_bytes, expected):
model_name="my-model",
outputs=[
ResponseOutput(
name="foo", datatype="BYTES", data=["bar1", "bar2"], shape=[2]
name="foo",
datatype="BYTES",
data=["bar1", "bar2"],
shape=[2, 1],
),
ResponseOutput(
name="foo2", datatype="BYTES", data=["var1"], shape=[1]
name="foo2", datatype="BYTES", data=["var1"], shape=[1, 1]
),
],
),
Expand All @@ -138,7 +144,7 @@ def test_decode_response(inference_response, expected):
name="foo",
datatype="BYTES",
data=[b"bar1", b"bar2"],
shape=[2],
shape=[2, 1],
parameters=Parameters(
content_type=MultiStringRequestCodec.InputCodec.ContentType
),
Expand All @@ -147,7 +153,7 @@ def test_decode_response(inference_response, expected):
name="foo2",
datatype="BYTES",
data=[b"var1"],
shape=[1],
shape=[1, 1],
parameters=Parameters(
content_type=MultiStringRequestCodec.InputCodec.ContentType
),
Expand All @@ -165,7 +171,7 @@ def test_decode_response(inference_response, expected):
name="foo",
datatype="BYTES",
data=["bar1", "bar2"],
shape=[2],
shape=[2, 1],
parameters=Parameters(
content_type=MultiStringRequestCodec.InputCodec.ContentType
),
Expand All @@ -174,7 +180,7 @@ def test_decode_response(inference_response, expected):
name="foo2",
datatype="BYTES",
data=["var1"],
shape=[1],
shape=[1, 1],
parameters=Parameters(
content_type=MultiStringRequestCodec.InputCodec.ContentType
),
Expand Down
Empty file.
2 changes: 0 additions & 2 deletions runtimes/mlflow/requirements/dev.txt
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
scikit-learn
torch==1.13.0
pytorch-lightning==1.8.0.post1
# Pin torchmetrics to a version older than 0.5.0 to avoid this issue:
# https://github.com/PyTorchLightning/pytorch-lightning/issues/10233
torchmetrics==0.10.2
torchvision==0.14.0
httpx==0.23.0

# Force local tests to use MLflow 2.x
mlflow >= 2.0.0rc0
Empty file.
2 changes: 1 addition & 1 deletion runtimes/mlflow/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from mlserver_mlflow import MLflowRuntime

from torch_fixtures import MNISTDataModule, LightningMNISTClassifier
from .torch_fixtures import MNISTDataModule, LightningMNISTClassifier

TESTS_PATH = os.path.dirname(__file__)
TESTDATA_PATH = os.path.join(TESTS_PATH, "testdata")
Expand Down
Empty file.
Empty file.
Loading

0 comments on commit 4377f21

Please sign in to comment.