From 0c909eaf216e7f610310d4f7cac9ed85ac64aafa Mon Sep 17 00:00:00 2001 From: braf Date: Tue, 10 Oct 2023 20:59:10 +0000 Subject: [PATCH] Removing deepcopy in an attempt to fix codeQL errors --- .../generate/perf_analyzer_config_generator.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/model_analyzer/config/generate/perf_analyzer_config_generator.py b/model_analyzer/config/generate/perf_analyzer_config_generator.py index 876be5c91..2ae96862d 100755 --- a/model_analyzer/config/generate/perf_analyzer_config_generator.py +++ b/model_analyzer/config/generate/perf_analyzer_config_generator.py @@ -16,7 +16,6 @@ import json import logging -from copy import deepcopy from typing import Dict, Generator, List, Optional, Tuple from model_analyzer.config.input.config_command_profile import ConfigCommandProfile @@ -192,10 +191,7 @@ def set_last_results( def _set_perf_analyzer_flags(self, model_perf_analyzer_flags: dict) -> dict: # For LLM models we will be creating custom input data based on prompt length - perf_analyzer_flags = deepcopy(model_perf_analyzer_flags) - # perf_analyzer_flags = { - # key: value for key, value in model_perf_analyzer_flags.items() - # } + perf_analyzer_flags = {k: v for k, v in model_perf_analyzer_flags.items()} if self._cli_config.is_llm_model(): perf_analyzer_flags.pop("input-data") @@ -212,9 +208,9 @@ def _create_input_dict(self, model_perf_analyzer_flags: dict) -> dict: return {} def _modify_prompt_in_input_dict(self, prompt_length: int) -> Dict: - modified_input_dict = deepcopy(self._llm_input_dict) - modified_prompt = ["hi"] * prompt_length + + modified_input_dict = {k: v for k, v in self._llm_input_dict.items()} modified_input_dict["data"][0]["PROMPT"] = modified_prompt return modified_input_dict @@ -322,10 +318,12 @@ def _create_base_perf_config(self) -> PerfAnalyzerConfig: def _extract_prompt_length( self, unmodified_parameter_combination: Dict ) -> Tuple[int, Dict]: + modified_parameter_combination = { + k: v for k, v in unmodified_parameter_combination.items() + } + if self._cli_config.is_llm_model(): - modified_parameter_combination = deepcopy(unmodified_parameter_combination) prompt_length = modified_parameter_combination.pop("prompt-length") - return prompt_length, modified_parameter_combination else: return 0, unmodified_parameter_combination