diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 03c6824..8552640 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,5 +57,5 @@ jobs: with: stack-name: ${{ matrix.stack-name }} python-version: ${{ matrix.python-version }} - ref-zenml: ${{ inputs.ref-zenml || 'develop' }} + ref-zenml: ${{ inputs.ref-zenml || 'feature/followup-run-metadata' }} ref-template: ${{ inputs.ref-template || github.ref }} diff --git a/README.md b/README.md index 148d10c..72f505b 100644 --- a/README.md +++ b/README.md @@ -125,13 +125,13 @@ To create parallel processing of computationally expensive operations we use a l Code snippet 💻 ```python -from zenml import log_artifact_metadata +from zenml import log_metadata score = accuracy_score(y_tst, y_pred) # log score along with output artifact as metadata -log_artifact_metadata( - output_name="hp_result", - metric=float(score), +log_metadata( + metadata={"metric": float(score)}, + artifact_name="hp_result", ) ``` diff --git a/template/steps/training/model_trainer.py b/template/steps/training/model_trainer.py index 720ee65..7fda505 100644 --- a/template/steps/training/model_trainer.py +++ b/template/steps/training/model_trainer.py @@ -5,7 +5,7 @@ import mlflow import pandas as pd from sklearn.base import ClassifierMixin -from zenml import ArtifactConfig, log_artifact_metadata, step, get_step_context +from zenml import ArtifactConfig, step, get_step_context from zenml.client import Client from zenml.integrations.mlflow.experiment_trackers import MLFlowExperimentTracker from zenml.integrations.mlflow.steps.mlflow_registry import mlflow_register_model_step diff --git a/template/steps/{% if hyperparameters_tuning %}hp_tuning{% endif %}/hp_tuning_single_search.py b/template/steps/{% if hyperparameters_tuning %}hp_tuning{% endif %}/hp_tuning_single_search.py index 693ce75..0a5b022 100644 --- a/template/steps/{% if hyperparameters_tuning %}hp_tuning{% endif %}/hp_tuning_single_search.py +++ b/template/steps/{% if hyperparameters_tuning %}hp_tuning{% endif %}/hp_tuning_single_search.py @@ -9,7 +9,7 @@ from sklearn.metrics import accuracy_score from sklearn.model_selection import RandomizedSearchCV from utils import get_model_from_config -from zenml import log_artifact_metadata, step +from zenml import log_metadata, step from zenml.logger import get_logger logger = get_logger(__name__) @@ -79,7 +79,7 @@ def hp_tuning_single_search( y_pred = cv.predict(X_tst) score = accuracy_score(y_tst, y_pred) # log score along with output artifact as metadata - log_artifact_metadata( + log_metadata( metadata={"metric": float(score)}, artifact_name="hp_result", )