diff --git a/customer-satisfaction/config.yaml b/customer-satisfaction/config.yaml index fdbe4840..68154525 100644 --- a/customer-satisfaction/config.yaml +++ b/customer-satisfaction/config.yaml @@ -6,7 +6,7 @@ settings: - mlflow # configuration of the Model Control Plane -model_version: +model: name: Customer_Satisfaction_Predictor license: Apache 2.0 description: Predictor of Customer Satisfaction. diff --git a/customer-satisfaction/pipelines/deployment_pipeline.py b/customer-satisfaction/pipelines/deployment_pipeline.py index 8c9f6712..bc1be4df 100644 --- a/customer-satisfaction/pipelines/deployment_pipeline.py +++ b/customer-satisfaction/pipelines/deployment_pipeline.py @@ -2,7 +2,7 @@ from zenml.integrations.mlflow.steps import mlflow_model_deployer_step -from zenml import pipeline, ModelVersion +from zenml import pipeline from pipelines.training_pipeline import customer_satisfaction_training_pipeline from steps import predictor diff --git a/customer-satisfaction/steps/evaluation.py b/customer-satisfaction/steps/evaluation.py index 502aa5e8..27b37d25 100644 --- a/customer-satisfaction/steps/evaluation.py +++ b/customer-satisfaction/steps/evaluation.py @@ -45,7 +45,7 @@ def evaluation( mlflow.log_metric("rmse", rmse) # Also add the metrics to the Model within the ZenML Model Control Plane - artifact = get_step_context().model_version.get_artifact("sklearn_regressor") + artifact = get_step_context().model.get_artifact("sklearn_regressor") log_artifact_metadata( metadata={ diff --git a/customer-satisfaction/steps/model_loader.py b/customer-satisfaction/steps/model_loader.py index 36f69a22..310587e9 100644 --- a/customer-satisfaction/steps/model_loader.py +++ b/customer-satisfaction/steps/model_loader.py @@ -1,5 +1,5 @@ from sklearn.base import RegressorMixin -from zenml import step, ModelVersion +from zenml import step, Model from zenml.client import Client @@ -12,9 +12,9 @@ def model_loader( Args: model_name: Name of the Model to load """ - model_version = ModelVersion( + model = Model( name=model_name, version="production" ) - model_artifact: RegressorMixin = model_version.load_artifact("sklearn_regressor") + model_artifact: RegressorMixin = model.load_artifact("sklearn_regressor") return model_artifact \ No newline at end of file diff --git a/customer-satisfaction/steps/model_promoter.py b/customer-satisfaction/steps/model_promoter.py index 32d642dc..c32f8cf1 100644 --- a/customer-satisfaction/steps/model_promoter.py +++ b/customer-satisfaction/steps/model_promoter.py @@ -1,4 +1,4 @@ -from zenml import get_step_context, step, ModelVersion +from zenml import get_step_context, step, Model from zenml.logger import get_logger logger = get_logger(__name__) @@ -21,37 +21,37 @@ def model_promoter( Returns: Whether the model was promoted or not. """ - # Get the current model_version produced by the current pipeline - model_version = get_step_context().model_version + # Get the current model produced by the current pipeline + zenml_model = get_step_context().model # Get the previous model version at the production stage - previous_production_model = ModelVersion( - name=model_version.name, + previous_production_model = Model( + name=zenml_model.name, version="production" ) try: # In case there already is a model version at the correct stage - previous_production_model_version_mse = float( + previous_production_model_mse = float( previous_production_model.get_artifact("sklearn_regressor").run_metadata["metrics"].value["mse"] ) except RuntimeError: # In case no model version has been promoted before, # default to a threshold value well above the new mse - previous_production_model_version_mse = mse + 1000 + previous_production_model_mse = mse + 1000 - if mse > previous_production_model_version_mse: + if mse > previous_production_model_mse: logger.info( f"Model mean-squared error {mse:.2f} is higher than" f" the mse of the previous production model " - f"{previous_production_model_version_mse:.2f} ! " + f"{previous_production_model_mse:.2f} ! " f"Not promoting model." ) is_promoted = False else: logger.info(f"Model promoted to {stage}!") is_promoted = True - model_version = get_step_context().model_version - model_version.set_stage(stage, force=True) + zenml_model = get_step_context().model + zenml_model.set_stage(stage, force=True) return is_promoted diff --git a/huggingface-sagemaker/run.ipynb b/huggingface-sagemaker/run.ipynb index eafab1fc..101517e6 100644 --- a/huggingface-sagemaker/run.ipynb +++ b/huggingface-sagemaker/run.ipynb @@ -5,7 +5,7 @@ "id": "51690802-31a7-4e6d-9f88-e6457c6c4a96", "metadata": {}, "source": [ - "# Huggingface Model to Sagemaker Endpoint: Automating MLOps with ZenML\r\n", + "# Huggingface Model to Sagemaker Endpoint: Automating MLOps with ZenML\n", "Deploying Huggingface models to AWS Sagemaker endpoints typically only requires a few lines of code. However, there's a growing demand to not just deploy, but to seamlessly automate the entire flow from training to production with comprehensive lineage tracking. ZenML adeptly fills this niche, providing an end-to-end MLOps solution for Huggingface users wishing to deploy to Sagemaker. Below, we’ll walk through the architecture that ZenML employs to bring a Huggingface model into production with AWS Sagemaker. Of course all of this can be adapted to not just Sagemaker, but any other model deployment service like GCP Vertex or Azure ML Platform.\n", "\n", "This blog post showcases one way of using ZenML pipelines to achieve this:\n", @@ -30,16 +30,13 @@ "import numpy as np\n", "from datasets import DatasetDict, load_dataset\n", "from typing_extensions import Annotated\n", - "from zenml import step\n", + "from zenml import step, pipeline, Model\n", "from zenml.logger import get_logger\n", "\n", "import os\n", "from typing import Optional\n", "from datetime import datetime as dt\n", "\n", - "from zenml import pipeline\n", - "from zenml.model import ModelConfig\n", - "\n", "from steps import (\n", " data_loader,\n", " notify_on_failure,\n", @@ -68,9 +65,9 @@ "metadata": {}, "source": [ "# 🍳Breaking it down\n", - "\r\n", - "\r\n", - "\r\n" + "\n", + "\n", + "\n" ] }, { @@ -250,12 +247,10 @@ "\n", "# This executes all steps in the pipeline in the correct order using the orchestrator\n", "# stack component that is configured in your active ZenML stack.\n", - "model_config = ModelConfig(\n", + "zenml_model = Model(\n", " name=zenml_model_name,\n", " license=\"Apache 2.0\",\n", " description=\"Show case Model Control Plane.\",\n", - " create_new_model_version=True,\n", - " delete_new_version_on_failure=True,\n", " tags=[\"sentiment_analysis\", \"huggingface\"],\n", ")\n", "\n", @@ -265,7 +260,7 @@ " pipeline_args[\"enable_cache\"] = False\n", "\n", "# Execute Feature Engineering Pipeline\n", - "pipeline_args[\"model_config\"] = model_config\n", + "pipeline_args[\"model\"] = zenml_model\n", "pipeline_args[\"config_path\"] = os.path.join(\"configs\", \"feature_engineering_config.yaml\")\n", "run_args_feature = {\n", " \"max_seq_length\": max_seq_length,\n", @@ -299,7 +294,7 @@ "id": "78ab8771-4421-4975-a3d5-12892a56b805", "metadata": {}, "source": [ - "## 💪 Step 2: Train the model with Huggingface Hub as the model registry\r\n", + "## 💪 Step 2: Train the model with Huggingface Hub as the model registry\n", " " ] }, @@ -342,7 +337,7 @@ "# run_args_train[\"tokenizer_artifact_id\"] = latest_run.steps['tokenizer_loader'].output.id\n", "\n", "# Configure the model\n", - "pipeline_args[\"model_config\"] = model_config\n", + "pipeline_args[\"model\"] = zenml_model\n", "\n", "pipeline_args[\n", " \"run_name\"\n", @@ -400,7 +395,7 @@ "id": "be79f454-a45d-4f5f-aa93-330d52069124", "metadata": {}, "source": [ - "## 🫅 Step 3: Promote the model to production\r\n" + "## 🫅 Step 3: Promote the model to production\n" ] }, { @@ -431,10 +426,10 @@ "outputs": [], "source": [ "run_args_promoting = {}\n", - "model_config = ModelConfig(name=zenml_model_name)\n", + "zenml_model = Model(name=zenml_model_name)\n", "pipeline_args[\"config_path\"] = os.path.join(\"configs\", \"promoting_config.yaml\")\n", "\n", - "pipeline_args[\"model_config\"] = model_config\n", + "pipeline_args[\"model\"] = zenml_model\n", "\n", "pipeline_args[\n", " \"run_name\"\n", @@ -458,7 +453,7 @@ "id": "6efc4968-35fd-42e3-ba62-d8e1557aa0d6", "metadata": {}, "source": [ - "## 💯 Step 4: Deploy the model to AWS Sagemaker Endpoints\r\n" + "## 💯 Step 4: Deploy the model to AWS Sagemaker Endpoints\n" ] }, { @@ -491,11 +486,11 @@ "pipeline_args[\"config_path\"] = os.path.join(\"configs\", \"deploying_config.yaml\")\n", "\n", "# Deploying pipeline has new ZenML model config\n", - "model_config = ModelConfig(\n", + "zenml_model = Model(\n", " name=zenml_model_name,\n", " version=ModelStages.PRODUCTION,\n", ")\n", - "pipeline_args[\"model_config\"] = model_config\n", + "pipeline_args[\"model\"] = zenml_model\n", "pipeline_args[\"enable_cache\"] = False\n", "run_args_deploying = {}\n", "pipeline_args[\n", @@ -520,10 +515,10 @@ "id": "594ee4fc-f102-4b99-bdc3-2f1670c87679", "metadata": {}, "source": [ - "ZenML builds upon the straightforward deployment capability of Huggingface models to AWS Sagemaker, and transforms it into a sophisticated, repeatable, and transparent MLOps workflow. It takes charge of the intricate steps necessary for modern ML systems, ensuring that software engineering leads can focus on iteration and innovation rather than operational intricacies.\r\n", - "\r\n", - "To delve deeper into each stage, refer to the comprehensive guide on GitHub[: zenml-io/zenml-huggingface-sagemak](https://github.com/zenml-io/zenml-huggingface-sagemaker)er. Additionally[, this YouTube playli](https://www.youtube.com/watch?v=Q1EH2H8Akgo&list=PLhNrLW_IWplw6dBbmGcL828-atJMu3CwF)st provides a detailed visual walkthrough of the entire pipeline: Huggingface to Sagemaker ZenML tutorial.\r\n", - "\r\n", + "ZenML builds upon the straightforward deployment capability of Huggingface models to AWS Sagemaker, and transforms it into a sophisticated, repeatable, and transparent MLOps workflow. It takes charge of the intricate steps necessary for modern ML systems, ensuring that software engineering leads can focus on iteration and innovation rather than operational intricacies.\n", + "\n", + "To delve deeper into each stage, refer to the comprehensive guide on GitHub[: zenml-io/zenml-huggingface-sagemak](https://github.com/zenml-io/zenml-huggingface-sagemaker)er. Additionally[, this YouTube playli](https://www.youtube.com/watch?v=Q1EH2H8Akgo&list=PLhNrLW_IWplw6dBbmGcL828-atJMu3CwF)st provides a detailed visual walkthrough of the entire pipeline: Huggingface to Sagemaker ZenML tutorial.\n", + "\n", "Interested in standardizing your MLOps workflows? ZenML Cloud is now available to all - get a managed ZenML server with important features such as RBAC and pipeline trigge[rs. Book a ](https://zenml.io/book-a-demo)demo with us now to learn how you can create your own MLOps pipelines today." ] } diff --git a/huggingface-sagemaker/run.py b/huggingface-sagemaker/run.py index f9323c6c..8124d4ce 100644 --- a/huggingface-sagemaker/run.py +++ b/huggingface-sagemaker/run.py @@ -23,7 +23,7 @@ from zenml.client import Client from zenml.enums import ModelStages from zenml.logger import get_logger -from zenml.model.model_version import ModelVersion +from zenml import Model from pipelines import ( sentinment_analysis_deploy_pipeline, @@ -200,12 +200,10 @@ def main( os.path.dirname(os.path.realpath(__file__)), "configs", ) - model_version = ModelVersion( + zenml_model = Model( name=zenml_model_name, license="Apache 2.0", description="Show case Model Control Plane.", - create_new_model_version=True, - delete_new_version_on_failure=True, tags=["sentiment_analysis", "huggingface"], ) @@ -216,7 +214,7 @@ def main( # Execute Feature Engineering Pipeline if feature_pipeline: - pipeline_args["model_version"] = model_version + pipeline_args["model"] = zenml_model pipeline_args["config_path"] = os.path.join(config_folder, "feature_engineering_config.yaml") run_args_feature = { "max_seq_length": max_seq_length, @@ -259,7 +257,7 @@ def main( run_args_train["dataset_artifact_id"] = tokenized_dataset_artifact.id run_args_train["tokenizer_artifact_id"] = tokenized_tokenizer_artifact.id - pipeline_args["model_version"] = model_version + pipeline_args["model"] = zenml_model pipeline_args[ "run_name" @@ -274,13 +272,13 @@ def main( if promoting_pipeline: run_args_promoting = {} # Promoting pipeline always check latest version - model_version = ModelVersion( + zenml_model = Model( name=zenml_model_name, version=ModelStages.LATEST, ) pipeline_args["config_path"] = os.path.join(config_folder, "promoting_config.yaml") - pipeline_args["model_version"] = model_version + pipeline_args["model"] = zenml_model pipeline_args[ "run_name" @@ -294,11 +292,11 @@ def main( pipeline_args["config_path"] = os.path.join(config_folder, "deploying_config.yaml") # Deploying pipeline has new ZenML model config - model_version = ModelVersion( + zenml_model = Model( name=zenml_model_name, version=ModelStages.PRODUCTION, ) - pipeline_args["model_version"] = model_version + pipeline_args["model"] = zenml_model pipeline_args["enable_cache"] = False run_args_deploying = {} pipeline_args[ diff --git a/huggingface-sagemaker/steps/deploying/sagemaker_deployment.py b/huggingface-sagemaker/steps/deploying/sagemaker_deployment.py index 3c0b4663..797e3348 100644 --- a/huggingface-sagemaker/steps/deploying/sagemaker_deployment.py +++ b/huggingface-sagemaker/steps/deploying/sagemaker_deployment.py @@ -49,7 +49,7 @@ def deploy_hf_to_sagemaker( # Otherwise, use the provided values. if repo_id is None or revision is None: context = get_step_context() - mv = context.model_version + mv = context.model deployment_metadata = mv.get_data_artifact(name="huggingface_url").run_metadata repo_id = deployment_metadata["repo_id"].value revision = deployment_metadata["revision"].value diff --git a/huggingface-sagemaker/steps/deploying/save_model.py b/huggingface-sagemaker/steps/deploying/save_model.py index f570a27f..18a58eae 100644 --- a/huggingface-sagemaker/steps/deploying/save_model.py +++ b/huggingface-sagemaker/steps/deploying/save_model.py @@ -44,11 +44,11 @@ def save_model_to_deploy(): f" Loading latest version of the model for stage {pipeline_extra['target_env']}..." ) # Get the current model version - latest_version = get_step_context().model_version + current_zenml_model = get_step_context().model # Load model and tokenizer from Model Control Plane - model = latest_version.load_artifact(name="model") - tokenizer = latest_version.load_artifact(name="tokenizer") + model = current_zenml_model.load_artifact(name="model") + tokenizer = current_zenml_model.load_artifact(name="tokenizer") # Save the model and tokenizer locally model_path = "./gradio/" # replace with the actual path tokenizer_path = "./gradio/" # replace with the actual path diff --git a/huggingface-sagemaker/steps/promotion/promote_get_metrics.py b/huggingface-sagemaker/steps/promotion/promote_get_metrics.py index 181aa221..f3135f02 100644 --- a/huggingface-sagemaker/steps/promotion/promote_get_metrics.py +++ b/huggingface-sagemaker/steps/promotion/promote_get_metrics.py @@ -25,9 +25,6 @@ logger = get_logger(__name__) -model_registry = Client().active_stack.model_registry - - @step def promote_get_metrics() -> ( Tuple[ @@ -54,24 +51,23 @@ def promote_get_metrics() -> ( zenml_client = Client() # Get current model version metric in current run - model_version = get_step_context().model_version - current_version = model_version._get_model_version() - current_metrics = current_version.get_model_artifact("model").run_metadata["metrics"].value + current_zenml_model = get_step_context().model + current_metrics = current_zenml_model.get_model_artifact("model").run_metadata["metrics"].value logger.info(f"Current model version metrics are {current_metrics}") # Get latest saved model version metric in target environment try: - latest_version = zenml_client.get_model_version( - model_name_or_id=model_version.name, + latest_zenml_model = zenml_client.get_model_version( + model_name_or_id=current_zenml_model.name, model_version_name_or_number_or_id=ModelStages( pipeline_extra["target_env"] ), ) except KeyError: - latest_version = None - if latest_version: + latest_zenml_model = None + if latest_zenml_model: latest_metrics = ( - latest_version.get_model_artifact("model").run_metadata["metrics"].value + latest_zenml_model.get_model_artifact("model").run_metadata["metrics"].value ) logger.info(f"Current model version metrics are {latest_metrics}") else: diff --git a/huggingface-sagemaker/steps/promotion/promote_metric_compare_promoter.py b/huggingface-sagemaker/steps/promotion/promote_metric_compare_promoter.py index 60135f7c..280ac0a8 100644 --- a/huggingface-sagemaker/steps/promotion/promote_metric_compare_promoter.py +++ b/huggingface-sagemaker/steps/promotion/promote_metric_compare_promoter.py @@ -78,9 +78,8 @@ def promote_metric_compare_promoter( should_promote = False if should_promote: - model_version = get_step_context().model_version - model_version = model_version._get_model_version() - model_version.set_stage(pipeline_extra["target_env"], force=True) + zenml_model = get_step_context().model + zenml_model.set_stage(pipeline_extra["target_env"], force=True) logger.info( f"Promoted current model version to {pipeline_extra['target_env']} environment" diff --git a/stack-showcase/app.py b/stack-showcase/app.py index 9fc1336d..cdd2d6aa 100644 --- a/stack-showcase/app.py +++ b/stack-showcase/app.py @@ -13,11 +13,11 @@ os.system(f"zenml connect --url {ZENML_STORE_URL} --api-key {ZENML_STORE_API_KEY}") client = Client() -zenml_model_version = client.get_model_version("breast_cancer_classifier", "production") -preprocess_pipeline = zenml_model_version.get_artifact("preprocess_pipeline").load() +zenml_model = client.get_model_version("breast_cancer_classifier", "production") +preprocess_pipeline = zenml_model.get_artifact("preprocess_pipeline").load() # Load the model -clf = zenml_model_version.get_artifact("model").load() +clf = zenml_model.get_artifact("model").load() # Load dataset to get feature names data = load_breast_cancer() diff --git a/stack-showcase/configs/deployment.yaml b/stack-showcase/configs/deployment.yaml index 661b3286..3fa9165a 100644 --- a/stack-showcase/configs/deployment.yaml +++ b/stack-showcase/configs/deployment.yaml @@ -5,7 +5,7 @@ settings: - sklearn # configuration of the Model Control Plane -model_version: +model: name: breast_cancer_classifier version: production license: Apache 2.0 diff --git a/stack-showcase/configs/feature_engineering.yaml b/stack-showcase/configs/feature_engineering.yaml index daa91a1e..c584be48 100644 --- a/stack-showcase/configs/feature_engineering.yaml +++ b/stack-showcase/configs/feature_engineering.yaml @@ -5,7 +5,7 @@ settings: - sklearn # configuration of the Model Control Plane -model_version: +model: name: breast_cancer_classifier license: Apache 2.0 description: Classification of Breast Cancer Dataset. diff --git a/stack-showcase/configs/inference.yaml b/stack-showcase/configs/inference.yaml index 661b3286..3fa9165a 100644 --- a/stack-showcase/configs/inference.yaml +++ b/stack-showcase/configs/inference.yaml @@ -5,7 +5,7 @@ settings: - sklearn # configuration of the Model Control Plane -model_version: +model: name: breast_cancer_classifier version: production license: Apache 2.0 diff --git a/stack-showcase/configs/training.yaml b/stack-showcase/configs/training.yaml index 99a31a0e..3f3c02c2 100644 --- a/stack-showcase/configs/training.yaml +++ b/stack-showcase/configs/training.yaml @@ -8,7 +8,7 @@ settings: - huggingface_hub # configuration of the Model Control Plane -model_version: +model: name: breast_cancer_classifier license: Apache 2.0 description: Classification of Breast Cancer Dataset. diff --git a/stack-showcase/run.ipynb b/stack-showcase/run.ipynb index 69063cec..85a6c449 100644 --- a/stack-showcase/run.ipynb +++ b/stack-showcase/run.ipynb @@ -48,7 +48,7 @@ "# Do the imports at the top\n", "\n", "import random\n", - "from zenml import ExternalArtifact, pipeline \n", + "from zenml import ExternalArtifact, pipeline, Model\n", "from zenml.client import Client\n", "from zenml.logger import get_logger\n", "from uuid import UUID\n", @@ -314,12 +314,12 @@ " The predictions as pandas series\n", " \"\"\"\n", " ### ADD YOUR OWN CODE HERE - THIS IS JUST AN EXAMPLE ###\n", - " model_version = get_step_context().model_version\n", + " zenml_model = get_step_context().model\n", "\n", - " print(model_version)\n", + " print(zenml_model)\n", "\n", " # run prediction from memory\n", - " predictor = model_version.load_artifact(\"model\")\n", + " predictor = zenml_model.load_artifact(\"model\")\n", " predictions = predictor.predict(dataset_inf)\n", "\n", " print(predictions)\n", @@ -467,7 +467,7 @@ "from datetime import datetime as dt\n", "\n", "from zenml import pipeline\n", - "from zenml.model import ModelConfig\n", + "from zenml import Model\n", "\n", "from steps import (\n", " data_loader,\n", @@ -679,12 +679,10 @@ "\n", "# This executes all steps in the pipeline in the correct order using the orchestrator\n", "# stack component that is configured in your active ZenML stack.\n", - "model_config = ModelConfig(\n", + "zenml_model = Model(\n", " name=zenml_model_name,\n", " license=\"Apache 2.0\",\n", " description=\"Show case Model Control Plane.\",\n", - " create_new_model_version=True,\n", - " delete_new_version_on_failure=True,\n", " tags=[\"sentiment_analysis\", \"huggingface\"],\n", ")\n", "\n", @@ -694,7 +692,7 @@ " pipeline_args[\"enable_cache\"] = False\n", "\n", "# Execute Feature Engineering Pipeline\n", - "pipeline_args[\"model_config\"] = model_config\n", + "pipeline_args[\"model\"] = zenml_model\n", "pipeline_args[\"config_path\"] = os.path.join(\"configs\", \"feature_engineering_config.yaml\")\n", "run_args_feature = {\n", " \"max_seq_length\": max_seq_length,\n", @@ -771,7 +769,7 @@ "# run_args_train[\"tokenizer_artifact_id\"] = latest_run.steps['tokenizer_loader'].output.id\n", "\n", "# Configure the model\n", - "pipeline_args[\"model_config\"] = model_config\n", + "pipeline_args[\"model\"] = zenml_model\n", "\n", "pipeline_args[\n", " \"run_name\"\n", @@ -860,10 +858,10 @@ "outputs": [], "source": [ "run_args_promoting = {}\n", - "model_config = ModelConfig(name=zenml_model_name)\n", + "zenml_model = Model(name=zenml_model_name)\n", "pipeline_args[\"config_path\"] = os.path.join(\"configs\", \"promoting_config.yaml\")\n", "\n", - "pipeline_args[\"model_config\"] = model_config\n", + "pipeline_args[\"model\"] = zenml_model\n", "\n", "pipeline_args[\n", " \"run_name\"\n", @@ -920,11 +918,11 @@ "pipeline_args[\"config_path\"] = os.path.join(\"configs\", \"deploying_config.yaml\")\n", "\n", "# Deploying pipeline has new ZenML model config\n", - "model_config = ModelConfig(\n", + "zenml_model = Model(\n", " name=zenml_model_name,\n", - " version=ModelStages.PRODUCTION,\n", + " version=\"production\",\n", ")\n", - "pipeline_args[\"model_config\"] = model_config\n", + "pipeline_args[\"model\"] = zenml_model\n", "pipeline_args[\"enable_cache\"] = False\n", "run_args_deploying = {}\n", "pipeline_args[\n", diff --git a/stack-showcase/run_stack_showcase.ipynb b/stack-showcase/run_stack_showcase.ipynb index b1acfc55..92e6dd7e 100644 --- a/stack-showcase/run_stack_showcase.ipynb +++ b/stack-showcase/run_stack_showcase.ipynb @@ -33,7 +33,7 @@ "from typing import Optional, List\n", "\n", "from zenml import pipeline\n", - "from zenml.model.model_version import ModelVersion\n", + "from zenml import Model\n", "\n", "from pipelines import feature_engineering\n", "\n", @@ -153,7 +153,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_version = ModelVersion(\n", + "zenml_model = Model(\n", " name=\"breast_cancer_classifier_model\",\n", " description=\"Classification of Breast Cancer Dataset.\",\n", " delete_new_version_on_failure=True,\n", @@ -162,7 +162,7 @@ "\n", "pipeline_args = {\n", " \"enable_cache\": True, \n", - " \"model_version\": model_version\n", + " \"model\": zenml_model\n", "}\n", "\n", "# Model Version config\n", @@ -269,7 +269,7 @@ "\n", "pipeline_args = {\n", " \"enable_cache\": True, \n", - " \"model_version\": model_version,\n", + " \"model\": zenml_model,\n", " \"settings\": {\"docker\": docker_settings}\n", "}\n", "\n", diff --git a/stack-showcase/steps/inference_predict.py b/stack-showcase/steps/inference_predict.py index c29b3525..8d74cc80 100644 --- a/stack-showcase/steps/inference_predict.py +++ b/stack-showcase/steps/inference_predict.py @@ -47,10 +47,10 @@ def inference_predict( The predictions as pandas series """ ### ADD YOUR OWN CODE HERE - THIS IS JUST AN EXAMPLE ### - model_version = get_step_context().model_version + zenml_model = get_step_context().model # run prediction from memory - predictor = model_version.load_artifact("model") + predictor = zenml_model.load_artifact("model") predictions = predictor.predict(dataset_inf) predictions = pd.Series(predictions, name="predicted") diff --git a/stack-showcase/steps/model_promoter.py b/stack-showcase/steps/model_promoter.py index 784e0cc5..04b9ce2a 100644 --- a/stack-showcase/steps/model_promoter.py +++ b/stack-showcase/steps/model_promoter.py @@ -35,8 +35,8 @@ def model_promoter(accuracy: float, stage: str = "production") -> bool: else: logger.info(f"Model promoted to {stage}!") is_promoted = True - model_version = get_step_context().model_version - model_version.set_stage(stage, force=True) + zenml_model = get_step_context().model + zenml_model.set_stage(stage, force=True) ### YOUR CODE ENDS HERE ### return is_promoted