diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 23f9193ea..4518c67e1 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -92,6 +92,7 @@ jobs: - huggingface - alibi-explain - alibi-detect + - catboost is-pr: - ${{ github.event_name == 'pull_request' }} exclude: diff --git a/README.md b/README.md index 53c328f2a..b5afb3f18 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,7 @@ Out of the box, MLServer provides support for: | XGBoost | ✅ | [MLServer XGBoost](./runtimes/xgboost) | | Spark MLlib | ✅ | [MLServer MLlib](./runtimes/mllib) | | LightGBM | ✅ | [MLServer LightGBM](./runtimes/lightgbm) | +| CatBoost | ✅ | [MLServer CatBoost](./runtimes/catboost) | | Tempo | ✅ | [`github.com/SeldonIO/tempo`](https://github.com/SeldonIO/tempo) | | MLflow | ✅ | [MLServer MLflow](./runtimes/mlflow) | | Alibi-Detect | ✅ | [MLServer Alibi Detect](./runtimes/alibi-detect) | @@ -91,6 +92,7 @@ MLServer to start serving your machine learning models. - [Serving a `scikit-learn` model](./docs/examples/sklearn/README.md) - [Serving a `xgboost` model](./docs/examples/xgboost/README.md) - [Serving a `lightgbm` model](./docs/examples/lightgbm/README.md) +- [Serving a `catboost` model](./docs/examples/catboost/README.md) - [Serving a `tempo` pipeline](./docs/examples/tempo/README.md) - [Serving a custom model](./docs/examples/custom/README.md) - [Serving an `alibi-detect` model](./docs/examples/alibi-detect/README.md) diff --git a/docs/examples/catboost/README.ipynb b/docs/examples/catboost/README.ipynb new file mode 100644 index 000000000..cd731f67f --- /dev/null +++ b/docs/examples/catboost/README.ipynb @@ -0,0 +1,190 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Serving CatBoost models\n", + "\n", + "Out of the box, `mlserver` supports the deployment and serving of `catboost` models.\n", + "By default, it will assume that these models have been [serialised using the `save_model()` method](https://catboost.ai/en/docs/concepts/python-reference_catboost_save_model).\n", + "\n", + "In this example, we will cover how we can train and serialise a simple model, to then serve it using `mlserver`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training\n", + "\n", + "To test the CatBoost Server, first we need to generate a simple CatBoost model using Python." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from catboost import CatBoostClassifier\n", + "\n", + "train_data = np.random.randint(0, 100, size=(100, 10))\n", + "train_labels = np.random.randint(0, 2, size=(100))\n", + "\n", + "model = CatBoostClassifier(iterations=2,\n", + " depth=2,\n", + " learning_rate=1,\n", + " loss_function='Logloss',\n", + " verbose=True)\n", + "model.fit(train_data, train_labels)\n", + "model.save_model('model.cbm')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our model will be persisted as a file named `model.cbm`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Serving\n", + "\n", + "Now that we have trained and saved our model, the next step will be to serve it using `mlserver`. \n", + "For that, we will need to create 2 configuration files: \n", + "\n", + "- `settings.json`: holds the configuration of our server (e.g. ports, log level, etc.).\n", + "- `model-settings.json`: holds the configuration of our model (e.g. input type, runtime to use, etc.)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### `settings.json`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile settings.json\n", + "{\n", + " \"debug\": \"true\"\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### `model-settings.json`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile model-settings.json\n", + "{\n", + " \"name\": \"catboost\",\n", + " \"implementation\": \"mlserver_catboost.CatboostModel\",\n", + " \"parameters\": {\n", + " \"uri\": \"./model.cbm\",\n", + " \"version\": \"v0.1.0\"\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Start serving our model\n", + "\n", + "Now that we have our config in-place, we can start the server by running `mlserver start .`. This needs to either be ran from the same directory where our config files are or pointing to the folder where they are.\n", + "\n", + "```shell\n", + "mlserver start .\n", + "```\n", + "\n", + "Since this command will start the server and block the terminal, waiting for requests, this will need to be ran in the background on a separate terminal." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Send test inference request\n", + "\n", + "We now have our model being served by `mlserver`.\n", + "To make sure that everything is working as expected, let's send a request from our test set.\n", + "\n", + "For that, we can use the Python types that `mlserver` provides out of box, or we can build our request manually." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "import requests\n", + "import numpy as np\n", + "\n", + "test_data = np.random.randint(0, 100, size=(1, 10))\n", + "\n", + "x_0 = test_data[0:1]\n", + "inference_request = {\n", + " \"inputs\": [\n", + " {\n", + " \"name\": \"predict-prob\",\n", + " \"shape\": x_0.shape,\n", + " \"datatype\": \"FP32\",\n", + " \"data\": x_0.tolist()\n", + " }\n", + " ]\n", + "}\n", + "\n", + "endpoint = \"http://localhost:8080/v2/models/catboost/versions/v0.1.0/infer\"\n", + "response = requests.post(endpoint, json=inference_request)\n", + "\n", + "print(response.json())" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/examples/catboost/README.md b/docs/examples/catboost/README.md new file mode 100644 index 000000000..c4a6c0768 --- /dev/null +++ b/docs/examples/catboost/README.md @@ -0,0 +1,104 @@ +# Serving CatBoost models + +Out of the box, `mlserver` supports the deployment and serving of `catboost` models. +By default, it will assume that these models have been [serialised using the `save_model()` method](https://catboost.ai/en/docs/concepts/python-reference_catboost_save_model). + +In this example, we will cover how we can train and serialise a simple model, to then serve it using `mlserver`. + +## Training + +To test the CatBoost Server, first we need to generate a simple CatBoost model using Python. + + +```python +import numpy as np +from catboost import CatBoostClassifier + +train_data = np.random.randint(0, 100, size=(100, 10)) +train_labels = np.random.randint(0, 2, size=(100)) + +model = CatBoostClassifier(iterations=2, + depth=2, + learning_rate=1, + loss_function='Logloss', + verbose=True) +model.fit(train_data, train_labels) +model.save_model('model.cbm') +``` + +Our model will be persisted as a file named `model.cbm`. + +## Serving + +Now that we have trained and saved our model, the next step will be to serve it using `mlserver`. +For that, we will need to create 2 configuration files: + +- `settings.json`: holds the configuration of our server (e.g. ports, log level, etc.). +- `model-settings.json`: holds the configuration of our model (e.g. input type, runtime to use, etc.). + +### `settings.json` + + +```python +%%writefile settings.json +{ + "debug": "true" +} +``` + +### `model-settings.json` + + +```python +%%writefile model-settings.json +{ + "name": "catboost", + "implementation": "mlserver_catboost.CatboostModel", + "parameters": { + "uri": "./model.cbm", + "version": "v0.1.0" + } +} +``` + +### Start serving our model + +Now that we have our config in-place, we can start the server by running `mlserver start .`. This needs to either be ran from the same directory where our config files are or pointing to the folder where they are. + +```shell +mlserver start . +``` + +Since this command will start the server and block the terminal, waiting for requests, this will need to be ran in the background on a separate terminal. + +### Send test inference request + +We now have our model being served by `mlserver`. +To make sure that everything is working as expected, let's send a request from our test set. + +For that, we can use the Python types that `mlserver` provides out of box, or we can build our request manually. + + +```python +import requests +import numpy as np + +test_data = np.random.randint(0, 100, size=(1, 10)) + +x_0 = test_data[0:1] +inference_request = { + "inputs": [ + { + "name": "predict-prob", + "shape": x_0.shape, + "datatype": "FP32", + "data": x_0.tolist() + } + ] +} + +endpoint = "http://localhost:8080/v2/models/catboost/versions/v0.1.0/infer" +response = requests.post(endpoint, json=inference_request) + +print(response.json()) +``` diff --git a/docs/examples/catboost/model-settings.json b/docs/examples/catboost/model-settings.json new file mode 100644 index 000000000..7967b0c63 --- /dev/null +++ b/docs/examples/catboost/model-settings.json @@ -0,0 +1,8 @@ +{ + "name": "catboost", + "implementation": "mlserver_catboost.CatboostModel", + "parameters": { + "uri": "./model.cbm", + "version": "v0.1.0" + } +} \ No newline at end of file diff --git a/docs/examples/catboost/model.cbm b/docs/examples/catboost/model.cbm new file mode 100644 index 000000000..874dac119 Binary files /dev/null and b/docs/examples/catboost/model.cbm differ diff --git a/docs/examples/catboost/settings.json b/docs/examples/catboost/settings.json new file mode 100644 index 000000000..f2ee7ab8d --- /dev/null +++ b/docs/examples/catboost/settings.json @@ -0,0 +1,3 @@ +{ + "debug": "true" +} diff --git a/docs/examples/index.md b/docs/examples/index.md index 02df03684..ea4d75000 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -16,6 +16,7 @@ models](./custom/README.md)). - [Serving Scikit-Learn models](./sklearn/README.md) - [Serving XGBoost models](./xgboost/README.md) - [Serving LightGBM models](./lightgbm/README.md) +- [Serving CatBoost models](./catboost/README.md) - [Serving Tempo pipelines](./tempo/README.md) - [Serving MLflow models](./mlflow/README.md) - [Serving custom models](./custom/README.md) diff --git a/docs/runtimes/catboost.md b/docs/runtimes/catboost.md new file mode 100644 index 000000000..5c7c78e22 --- /dev/null +++ b/docs/runtimes/catboost.md @@ -0,0 +1,3 @@ +```{include} ../../runtimes/catboost/README.md +:relative-docs: ../../docs/ +``` diff --git a/docs/runtimes/index.md b/docs/runtimes/index.md index 4259c4324..fd3524d3b 100644 --- a/docs/runtimes/index.md +++ b/docs/runtimes/index.md @@ -22,17 +22,16 @@ class in your `model-settings.json` file. ## Included Inference Runtimes -| Framework | Package Name | Implementation Class | Example | Documentation | -| ------------- | ------------------------ | -------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------------- | -| Scikit-Learn | `mlserver-sklearn` | `mlserver_sklearn.SKLearnModel` | [Scikit-Learn example](../examples/sklearn/README.md) | [MLServer SKLearn](./sklearn) | -| XGBoost | `mlserver-xgboost` | `mlserver_xgboost.XGBoostModel` | [XGBoost example](../examples/xgboost/README.md) | [MLServer XGBoost](./xgboost) | -| HuggingFace | `mlserver-huggingface` | `mlserver_huggingface.HuggingFaceRuntime` | [HuggingFace example](../examples/huggingface/README.md) | [MLServer HuggingFace](./huggingface) | -| Spark MLlib | `mlserver-mllib` | `mlserver_mllib.MLlibModel` | Coming Soon | [MLServer MLlib](./mllib) | -| LightGBM | `mlserver-lightgbm` | `mlserver_lightgbm.LightGBMModel` | [LightGBM example](../examples/lightgbm/README.md) | [MLServer LightGBM](./lightgbm) | -| Tempo | `tempo` | `tempo.mlserver.InferenceRuntime` | [Tempo example](../examples/tempo/README.md) | [`github.com/SeldonIO/tempo`](https://github.com/SeldonIO/tempo) | -| MLflow | `mlserver-mlflow` | `mlserver_mlflow.MLflowRuntime` | [MLflow example](../examples/mlflow/README.md) | [MLServer MLflow](./mlflow) | -| Alibi-Detect | `mlserver-alibi-detect` | `mlserver_alibi_detect.AlibiDetectRuntime` | [Alibi-detect example](../examples/alibi-detect/README.md) | [MLServer Alibi-Detect](./alibi-detect) | -| Alibi-Explain | `mlserver-alibi-explain` | `mlserver_alibi_explain.AlibiExplainRuntime` | Coming Soon | [MLServer Alibi-Explain](./alibi-explain) | +| Framework | Package Name | Implementation Class | Example | Documentation | +| ------------ | ----------------------- | ------------------------------------------ | ---------------------------------------------------------- | ---------------------------------------------------------------- | +| Scikit-Learn | `mlserver-sklearn` | `mlserver_sklearn.SKLearnModel` | [Scikit-Learn example](../examples/sklearn/README.md) | [MLServer SKLearn](./sklearn) | +| XGBoost | `mlserver-xgboost` | `mlserver_xgboost.XGBoostModel` | [XGBoost example](../examples/xgboost/README.md) | [MLServer XGBoost](./xgboost) | +| Spark MLlib | `mlserver-mllib` | `mlserver_mllib.MLlibModel` | Coming Soon | [MLServer MLlib](./mllib) | +| LightGBM | `mlserver-lightgbm` | `mlserver_lightgbm.LightGBMModel` | [LightGBM example](../examples/lightgbm/README.md) | [MLServer LightGBM](./lightgbm) | +| CatBoost | `mlserver-catboost` | `mlserver_catboost.CatboostModel` | [CatBoost example](../examples/catboost/README.md) | [MLServer CatBoost](./catboost) | +| Tempo | `tempo` | `tempo.mlserver.InferenceRuntime` | [Tempo example](../examples/tempo/README.md) | [`github.com/SeldonIO/tempo`](https://github.com/SeldonIO/tempo) | +| MLflow | `mlserver-mlflow` | `mlserver_mlflow.MLflowRuntime` | [MLflow example](../examples/mlflow/README.md) | [MLServer MLflow](./mlflow) | +| Alibi-Detect | `mlserver-alibi-detect` | `mlserver_alibi_detect.AlibiDetectRuntime` | [Alibi-detect example](../examples/alibi-detect/README.md) | [MLServer Alibi-Detect](./alibi-detect) | ```{toctree} :hidden: @@ -44,6 +43,7 @@ MLflow <./mlflow> Tempo Spark MLlib <./mllib> LightGBM <./lightgbm> +Catboost <./catboost> Alibi-Detect <./alibi-detect> Alibi-Explain <./alibi-explain> HuggingFace <./huggingface> diff --git a/runtimes/catboost/LICENSE b/runtimes/catboost/LICENSE new file mode 100644 index 000000000..b84e13561 --- /dev/null +++ b/runtimes/catboost/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Seldon Technologies Ltd. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/runtimes/catboost/README.md b/runtimes/catboost/README.md new file mode 100644 index 000000000..3dfd52b51 --- /dev/null +++ b/runtimes/catboost/README.md @@ -0,0 +1,23 @@ +# CatBoost runtime for MLServer + +This package provides a MLServer runtime compatible with CatBoost's `CatboostClassifier`. + +## Usage + +You can install the runtime, alongside `mlserver`, as: + +```bash +pip install mlserver mlserver-catboost +``` + +For further information on how to use MLServer with CatBoost, you can check out +this [worked out example](../../docs/examples/catboost/README.md). + +## Content Types + +If no [content type](../../docs/user-guide/content-type) is present on the +request or metadata, the CatBoost runtime will try to decode the payload as +a [NumPy Array](../../docs/user-guide/content-type). +To avoid this, either send a different content type explicitly, or define the +correct one as part of your [model's +metadata](../../docs/reference/model-settings). diff --git a/runtimes/catboost/mlserver_catboost/__init__.py b/runtimes/catboost/mlserver_catboost/__init__.py new file mode 100644 index 000000000..39fa6ab0a --- /dev/null +++ b/runtimes/catboost/mlserver_catboost/__init__.py @@ -0,0 +1,3 @@ +from .catboost import CatboostModel + +__all__ = ["CatboostModel"] diff --git a/runtimes/catboost/mlserver_catboost/catboost.py b/runtimes/catboost/mlserver_catboost/catboost.py new file mode 100644 index 000000000..c8cb2e856 --- /dev/null +++ b/runtimes/catboost/mlserver_catboost/catboost.py @@ -0,0 +1,35 @@ +from catboost import CatBoostClassifier + +from mlserver import types +from mlserver.model import MLModel +from mlserver.utils import get_model_uri +from mlserver.codecs import NumpyCodec, NumpyRequestCodec + + +WELLKNOWN_MODEL_FILENAMES = ["model.cbm", "model.bin"] + + +class CatboostModel(MLModel): + """ + Implementation of the MLModel interface to load and serve `catboost` models. + """ + + async def load(self) -> bool: + model_uri = await get_model_uri( + self._settings, wellknown_filenames=WELLKNOWN_MODEL_FILENAMES + ) + + self._model = CatBoostClassifier() + self._model.load_model(model_uri) + self.ready = True + return self.ready + + async def predict(self, payload: types.InferenceRequest) -> types.InferenceResponse: + decoded = self.decode_request(payload, default_codec=NumpyRequestCodec) + prediction = self._model.predict(decoded) + + return types.InferenceResponse( + model_name=self.name, + model_version=self.version, + outputs=[NumpyCodec.encode(name="predict", payload=prediction)], + ) diff --git a/runtimes/catboost/mlserver_catboost/version.py b/runtimes/catboost/mlserver_catboost/version.py new file mode 100644 index 000000000..84568d209 --- /dev/null +++ b/runtimes/catboost/mlserver_catboost/version.py @@ -0,0 +1 @@ +__version__ = "1.4.0.dev3" diff --git a/runtimes/catboost/pyproject.toml b/runtimes/catboost/pyproject.toml new file mode 100644 index 000000000..bc83e8981 --- /dev/null +++ b/runtimes/catboost/pyproject.toml @@ -0,0 +1,22 @@ +[tool.poetry] +name = "mlserver-catboost" +version = "1.4.0.dev3" +description = "Catboost runtime for MLServer" +authors = ["Theofilos Papapanagiotou "] +maintainers = ["Seldon Technologies Ltd. "] +license = "Apache-2.0" +readme = "README.md" +packages = [{include = "mlserver_catboost"}] + +[tool.poetry.dependencies] +python = "^3.8.1,<3.12" +catboost = "*" +joblib = "*" +mlserver = "*" + +[tool.poetry.group.dev.dependencies] +mlserver = {path = "../..", develop = true} + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/runtimes/catboost/tests/conftest.py b/runtimes/catboost/tests/conftest.py new file mode 100644 index 000000000..7fba251cc --- /dev/null +++ b/runtimes/catboost/tests/conftest.py @@ -0,0 +1,58 @@ +import pytest +import os +import numpy as np +from catboost import CatBoostClassifier # , CatBoostRegressor, CatBoostRanker + +from mlserver.settings import ModelSettings, ModelParameters +from mlserver.types import InferenceRequest + +from mlserver_catboost import CatboostModel + +TESTS_PATH = os.path.dirname(__file__) +TESTDATA_PATH = os.path.join(TESTS_PATH, "testdata") + + +@pytest.fixture +def model_uri(tmp_path) -> str: + train_data = np.random.randint(0, 100, size=(100, 10)) + train_labels = np.random.randint(0, 2, size=(100)) + + model = CatBoostClassifier( + iterations=2, depth=2, learning_rate=1, loss_function="Logloss", verbose=True + ) + # TODO: add a selector for the regressor/ranker classes + # model = CatBoostRegressor( + # iterations=2, depth=2, learning_rate=1, loss_function="RMSE", verbose=True + # ) + # model = CatBoostRanker( + # iterations=2, depth=2, learning_rate=1, loss_function="RMSE", verbose=True + # ) + model.fit(train_data, train_labels) + + model_uri = os.path.join(tmp_path, "model.cbm") + model.save_model(model_uri) + + return model_uri + + +@pytest.fixture +def model_settings(model_uri: str) -> ModelSettings: + return ModelSettings( + name="catboost-model", + implementation=CatboostModel, + parameters=ModelParameters(uri=model_uri, version="v1.2.3"), + ) + + +@pytest.fixture +async def model(model_settings: ModelSettings) -> CatboostModel: + model = CatboostModel(model_settings) + await model.load() + + return model + + +@pytest.fixture +def inference_request() -> InferenceRequest: + payload_path = os.path.join(TESTDATA_PATH, "inference-request.json") + return InferenceRequest.parse_file(payload_path) diff --git a/runtimes/catboost/tests/test_catboost.py b/runtimes/catboost/tests/test_catboost.py new file mode 100644 index 000000000..0817db456 --- /dev/null +++ b/runtimes/catboost/tests/test_catboost.py @@ -0,0 +1,48 @@ +import pytest +import os +from catboost import CatBoostClassifier + +from mlserver.settings import ModelSettings +from mlserver.codecs import CodecError +from mlserver.types import RequestInput, InferenceRequest + +from mlserver_catboost import CatboostModel +from mlserver_catboost.catboost import WELLKNOWN_MODEL_FILENAMES + + +def test_load(model: CatboostModel): + assert model.ready + assert type(model._model) == CatBoostClassifier + + +@pytest.mark.parametrize("fname", WELLKNOWN_MODEL_FILENAMES) +async def test_load_folder(fname, model_uri: str, model_settings: ModelSettings): + model_folder = os.path.dirname(model_uri) + model_path = os.path.join(model_folder, fname) + os.rename(model_uri, model_path) + + model_settings.parameters.uri = model_path # type: ignore + + model = CatboostModel(model_settings) + await model.load() + + assert model.ready + assert type(model._model) == CatBoostClassifier + + +async def test_predict(model: CatboostModel, inference_request: InferenceRequest): + response = await model.predict(inference_request) + + assert len(response.outputs) == 1 + assert 0 <= response.outputs[0].data[0] <= 1 + + +async def test_multiple_inputs_error( + model: CatboostModel, inference_request: InferenceRequest +): + inference_request.inputs.append( + RequestInput(name="input-1", shape=[1, 2], data=[[0, 1]], datatype="FP32") + ) + + with pytest.raises(CodecError): + await model.predict(inference_request) diff --git a/runtimes/catboost/tests/testdata/inference-request.json b/runtimes/catboost/tests/testdata/inference-request.json new file mode 100644 index 000000000..7bc1688b8 --- /dev/null +++ b/runtimes/catboost/tests/testdata/inference-request.json @@ -0,0 +1,11 @@ +{ + "inputs": [ + { + "name": "input-0", + "shape": [10], + "datatype": "INT32", + "data": [[56, 2, 85, 72, 4, 87, 26, 50, 32, 19]] + } + ], + "outputs": [{ "name": "predict" }] +} diff --git a/runtimes/catboost/tox.ini b/runtimes/catboost/tox.ini new file mode 100644 index 000000000..21f3266d4 --- /dev/null +++ b/runtimes/catboost/tox.ini @@ -0,0 +1,11 @@ +[tox] +isolated_build = true + +[testenv] +allowlist_externals = poetry +commands_pre = + poetry install --sync --no-root + poetry install -C {toxinidir}/../../ +commands = + python -m pytest {posargs} \ + {toxinidir}/tests