From ece1d4651fb56e05591bc9c01834500e1eaaac9b Mon Sep 17 00:00:00 2001 From: Baris Can Durak Date: Thu, 28 Nov 2024 15:04:07 +0100 Subject: [PATCH] fixing the call --- template/steps/prepare_datasets.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/template/steps/prepare_datasets.py b/template/steps/prepare_datasets.py index b9e78a8..9313b83 100644 --- a/template/steps/prepare_datasets.py +++ b/template/steps/prepare_datasets.py @@ -6,7 +6,7 @@ from materializers.directory_materializer import DirectoryMaterializer from typing_extensions import Annotated from utils.tokenizer import generate_and_tokenize_prompt, load_tokenizer -from zenml import get_step_context, log_metadata, step +from zenml import log_metadata, step from zenml.materializers import BuiltInMaterializer from zenml.utils.cuda_utils import cleanup_gpu_memory @@ -33,16 +33,13 @@ def prepare_data( cleanup_gpu_memory(force=True) - context = get_step_context() - if context.model: - log_metadata( - metadata={ - "system_prompt": system_prompt, - "base_model_id": base_model_id, - }, - model_name=context.model.name, - model_version=context.model.version, - ) + log_metadata( + metadata={ + "system_prompt": system_prompt, + "base_model_id": base_model_id, + }, + infer_model=True, + ) tokenizer = load_tokenizer(base_model_id, False, use_fast) gen_and_tokenize = partial(