From bcef8ed4ec9b94eb98505421ddafe7f9adeb7d55 Mon Sep 17 00:00:00 2001 From: sailgpu Date: Thu, 4 Jan 2024 11:41:27 +0000 Subject: [PATCH] linting change --- llm/kubeflow_inference_run.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/llm/kubeflow_inference_run.py b/llm/kubeflow_inference_run.py index a30c6cb..7556e61 100644 --- a/llm/kubeflow_inference_run.py +++ b/llm/kubeflow_inference_run.py @@ -388,8 +388,10 @@ def execute(params: argparse.Namespace) -> None: model_info["repo_version"] = check_if_valid_version(model_info, mount_path) if quantize_bits and int(quantize_bits) not in [4, 8]: - print("## Quantization precision bits should be either 4 or 8." - " Default precision used is 16 (bfloat16)") + print( + "## Quantization precision bits should be either 4 or 8." + " Default precision used is 16 (bfloat16)" + ) sys.exit(1) elif quantize_bits and deployment_resources["gpus"]: print("## BitsAndBytes Quantization requires GPUs")