From cf716688cfc96741ec6435c620e89c3014985c8b Mon Sep 17 00:00:00 2001 From: Boris Feld Date: Tue, 9 Jul 2024 17:37:29 +0200 Subject: [PATCH] Try fixing the Tensorflow example --- .../metaflow/metaflow-model-evaluation/README.md | 2 +- .../metaflow-model-evaluation/metaflow-model-evaluation.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/README.md b/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/README.md index 0ea8e142..073904ce 100644 --- a/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/README.md +++ b/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/README.md @@ -32,7 +32,7 @@ export COMET_WORKSPACE= In this guide, we will demonstrate how to use Comet's Metaflow integration to build a simple model evaluation flow. ```shell -python metaflow_model_evaluation.py run --max-workers 1 --n_samples 100 +python metaflow-model-evaluation.py run --max-workers 1 --n_samples 100 ``` Our flow consists of two steps. diff --git a/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/metaflow-model-evaluation.py b/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/metaflow-model-evaluation.py index 3b497e6a..5ef69339 100644 --- a/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/metaflow-model-evaluation.py +++ b/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/metaflow-model-evaluation.py @@ -215,8 +215,7 @@ def evaluate_classification_metrics(self): ) accuracy = accuracy_score(labels, torch.argmax(predictions, dim=1)) - self.comet_experiment.log_metrics(clf_metrics["micro avg"], prefix="micro_avg") - self.comet_experiment.log_metrics(clf_metrics["macro avg"], prefix="macro_avg") + self.comet_experiment.log_metrics(clf_metrics) self.comet_experiment.log_metrics({"accuracy": accuracy}) log_model(self.comet_experiment, model, self.input)