diff --git a/README.md b/README.md index 50e1700..55542f9 100644 --- a/README.md +++ b/README.md @@ -308,7 +308,7 @@ You can follow [Data Validators docs](https://docs.zenml.io/stacks-and-component As a last step concluding all work done so far, we will calculate predictions on the inference dataset and persist them in [Artifact Store](https://docs.zenml.io/stacks-and-components/component-guide/artifact-stores) attached to the current inference model version of the Model Control Plane for reuse and observability. -We will leverage a prepared predictions service called `mlflow_deployment` linked to the inference model version of the Model Control Plane to run `.predict()` and to put predictions as an output of the predictions step, so it is automatically stored in the [Artifact Store](https://docs.zenml.io/stacks-and-components/component-guide/artifact-stores) and linked to the Model Control Plane model version as a versioned artifact link with zero effort. This is achieved because we additionally annotated the `predictions` output with `ArtifactConfig(overwrite=False)`. This is required to deliver a comprehensive history to stakeholders since Batch Inference can be executed using the same Model Control Plane version multiple times. +We will leverage a prepared predictions service called `mlflow_deployment` linked to the inference model version of the Model Control Plane to run `.predict()` and to put predictions as an output of the predictions step, so it is automatically stored in the [Artifact Store](https://docs.zenml.io/stacks-and-components/component-guide/artifact-stores) and linked to the Model Control Plane model version as a versioned artifact link with zero effort. This is achieved because we additionally annotated the `predictions` output with `DataArtifactConfig(overwrite=False)`. This is required to deliver a comprehensive history to stakeholders since Batch Inference can be executed using the same Model Control Plane version multiple times. ``` NOTE: On non-local orchestrators a `model` artifact will be loaded into memory to run predictions directly. You can adapt this part to your needs. @@ -318,12 +318,12 @@ NOTE: On non-local orchestrators a `model` artifact will be loaded into memory t Code snippet 💻 ```python -from zenml.model import ArtifactConfig +from zenml.model import DataArtifactConfig @step def inference_predict( dataset_inf: pd.DataFrame, -) -> Annotated[pd.Series, "predictions", ArtifactConfig(overwrite=False)]: +) -> Annotated[pd.Series, "predictions", DataArtifactConfig(overwrite=False)]: model_version = get_step_context().model_version # get predictor diff --git a/template/steps/deployment/deployment_deploy.py b/template/steps/deployment/deployment_deploy.py index edfb7d0..c66c002 100644 --- a/template/steps/deployment/deployment_deploy.py +++ b/template/steps/deployment/deployment_deploy.py @@ -11,7 +11,7 @@ mlflow_model_registry_deployer_step, ) from zenml.logger import get_logger -from zenml.model import DeploymentArtifactConfig +from zenml.model import EndpointArtifactConfig logger = get_logger(__name__) @@ -21,7 +21,7 @@ def deployment_deploy() -> ( Annotated[ Optional[MLFlowDeploymentService], "mlflow_deployment", - DeploymentArtifactConfig(), + EndpointArtifactConfig(), ] ): """Predictions step. diff --git a/template/steps/etl/inference_data_preprocessor.py b/template/steps/etl/inference_data_preprocessor.py index 9a20f61..649b3ec 100644 --- a/template/steps/etl/inference_data_preprocessor.py +++ b/template/steps/etl/inference_data_preprocessor.py @@ -6,7 +6,7 @@ import pandas as pd from sklearn.pipeline import Pipeline from zenml import step -from zenml.model import ArtifactConfig +from zenml.model import DataArtifactConfig @step @@ -17,7 +17,7 @@ def inference_data_preprocessor( ) -> Annotated[ pd.DataFrame, "dataset_inf", - ArtifactConfig(overwrite=False, artifact_name="inference_dataset"), + DataArtifactConfig(overwrite=False, artifact_name="inference_dataset"), ]: """Data preprocessor step. diff --git a/template/steps/inference/inference_predict.py b/template/steps/inference/inference_predict.py index 190da25..2eeb729 100644 --- a/template/steps/inference/inference_predict.py +++ b/template/steps/inference/inference_predict.py @@ -8,7 +8,7 @@ from zenml import get_step_context, step from zenml.integrations.mlflow.services.mlflow_deployment import MLFlowDeploymentService from zenml.logger import get_logger -from zenml.model import ArtifactConfig +from zenml.model import DataArtifactConfig logger = get_logger(__name__) @@ -16,7 +16,7 @@ @step def inference_predict( dataset_inf: pd.DataFrame, -) -> Annotated[pd.Series, "predictions", ArtifactConfig(overwrite=False)]: +) -> Annotated[pd.Series, "predictions", DataArtifactConfig(overwrite=False)]: """Predictions step. This is an example of a predictions step that takes the data in and returns