diff --git a/model_analyzer/config/generate/optuna_run_config_generator.py b/model_analyzer/config/generate/optuna_run_config_generator.py index c1c090033..9353be2f6 100755 --- a/model_analyzer/config/generate/optuna_run_config_generator.py +++ b/model_analyzer/config/generate/optuna_run_config_generator.py @@ -141,14 +141,7 @@ def get_configs(self) -> Generator[RunConfig, None, None]: yield default_run_config self._default_measurement = self._last_measurement - total_num_of_possible_configs = ( - self._search_parameters.number_of_total_possible_configurations() - ) - max_configs_to_search = int( - total_num_of_possible_configs - * self._config.max_percentage_of_search_space - / 100 - ) + max_configs_to_search = self._determine_maximum_number_of_configs_to_search() # TODO: TMA-1885: Need an early exit strategy for _ in range(max_configs_to_search): @@ -159,6 +152,57 @@ def get_configs(self) -> Generator[RunConfig, None, None]: score = self._calculate_score() self._study.tell(trial, score) + def _determine_maximum_number_of_configs_to_search(self) -> int: + max_trials_based_on_percentage_of_search_space = ( + self._determine_trials_based_on_max_percentage_of_search_space() + ) + + max_configs_to_search = self._decide_between_percentage_and_trial_count( + max_trials_based_on_percentage_of_search_space + ) + + return max_configs_to_search + + def _determine_trials_based_on_max_percentage_of_search_space(self) -> int: + total_num_of_possible_configs = ( + self._search_parameters.number_of_total_possible_configurations() + ) + max_trials_based_on_percentage_of_search_space = int( + total_num_of_possible_configs + * self._config.max_percentage_of_search_space + / 100 + ) + + return max_trials_based_on_percentage_of_search_space + + def _decide_between_percentage_and_trial_count( + self, max_trials_based_on_percentage_of_search_space: int + ) -> int: + # By default we will search based on percentage of search space + # If the user specifies a number of trials we will use that instead + # If both are specified we will use the smaller number + max_trials_set_by_user = self._config.get_config()[ + "optuna_max_trials" + ].is_set_by_user() + max_percentage_set_by_user = self._config.get_config()[ + "max_percentage_of_search_space" + ].is_set_by_user() + + if max_trials_set_by_user and max_percentage_set_by_user: + if ( + self._config.optuna_max_trials + < max_trials_based_on_percentage_of_search_space + ): + max_configs_to_search = self._config.optuna_max_trials + else: + max_configs_to_search = max_trials_based_on_percentage_of_search_space + elif max_trials_set_by_user: + max_configs_to_search = self._config.optuna_max_trials + else: + max_configs_to_search = max_trials_based_on_percentage_of_search_space + + return max_configs_to_search + def _create_trial_objectives(self, trial: optuna.Trial) -> TrialObjectives: trial_objectives: TrialObjectives = {} for parameter_name in OptunaRunConfigGenerator.optuna_parameter_list: diff --git a/model_analyzer/config/input/config_command_profile.py b/model_analyzer/config/input/config_command_profile.py index 98b5c09c8..0fc7bb5d1 100755 --- a/model_analyzer/config/input/config_command_profile.py +++ b/model_analyzer/config/input/config_command_profile.py @@ -63,7 +63,9 @@ DEFAULT_ONLINE_OBJECTIVES, DEFAULT_ONLINE_PLOTS, DEFAULT_OPTUNA_MAX_PERCENTAGE_OF_SEARCH_SPACE, + DEFAULT_OPTUNA_MAX_TRIALS, DEFAULT_OPTUNA_MIN_PERCENTAGE_OF_SEARCH_SPACE, + DEFAULT_OPTUNA_MIN_TRIALS, DEFAULT_OUTPUT_MODEL_REPOSITORY, DEFAULT_OVERRIDE_OUTPUT_REPOSITORY_FLAG, DEFAULT_PERF_ANALYZER_CPU_UTIL, @@ -936,6 +938,24 @@ def _add_run_search_configs(self): description="Maximum percentage of the search space to profile when using Optuna", ) ) + self._add_config( + ConfigField( + "optuna_min_trials", + flags=["--optuna_min_trials"], + field_type=ConfigPrimitive(int), + default_value=DEFAULT_OPTUNA_MIN_TRIALS, + description="Minimum number of trials to profile when using Optuna", + ) + ) + self._add_config( + ConfigField( + "optuna_max_trials", + flags=["--optuna_max_trials"], + field_type=ConfigPrimitive(int), + default_value=DEFAULT_OPTUNA_MAX_TRIALS, + description="Maximum number of trials to profile when using Optuna", + ) + ) self._add_config( ConfigField( "run_config_search_mode", diff --git a/model_analyzer/config/input/config_defaults.py b/model_analyzer/config/input/config_defaults.py index 86a462a91..813ed7f7b 100755 --- a/model_analyzer/config/input/config_defaults.py +++ b/model_analyzer/config/input/config_defaults.py @@ -56,6 +56,8 @@ DEFAULT_RUN_CONFIG_PROFILE_MODELS_CONCURRENTLY_ENABLE = False DEFAULT_OPTUNA_MIN_PERCENTAGE_OF_SEARCH_SPACE = 5 DEFAULT_OPTUNA_MAX_PERCENTAGE_OF_SEARCH_SPACE = 10 +DEFAULT_OPTUNA_MIN_TRIALS = 20 +DEFAULT_OPTUNA_MAX_TRIALS = 200 DEFAULT_REQUEST_RATE_SEARCH_ENABLE = False DEFAULT_TRITON_LAUNCH_MODE = "local" DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:24.05-py3" diff --git a/tests/test_cli.py b/tests/test_cli.py index b4a1651cf..3a6785273 100755 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -87,6 +87,8 @@ def get_test_options(): OptionStruct("int", "profile", "--run-config-search-max-binary-search-steps", None, "10", "5"), OptionStruct("int", "profile", "--min_percentage_of_search_space", None, "10", "5"), OptionStruct("int", "profile", "--max_percentage_of_search_space", None, "5", "10"), + OptionStruct("int", "profile", "--optuna_min_trials", None, "10", "20"), + OptionStruct("int", "profile", "--optuna_max_trials", None, "5", "200"), OptionStruct("float", "profile", "--monitoring-interval", "-i", "10.0", "1.0"), OptionStruct("float", "profile", "--perf-analyzer-cpu-util", None, "10.0", str(psutil.cpu_count() * 80.0)), OptionStruct("int", "profile", "--num-configs-per-model", None, "10", "3"), diff --git a/tests/test_optuna_run_config_generator.py b/tests/test_optuna_run_config_generator.py index 34670f3a2..cfe5ea3a4 100755 --- a/tests/test_optuna_run_config_generator.py +++ b/tests/test_optuna_run_config_generator.py @@ -72,6 +72,54 @@ def setUp(self): seed=100, ) + def test_max_number_of_configs_to_search_percentage(self): + """ + Test percentage based max num of configs to search + """ + max_configs_to_search = ( + self._rcg._determine_maximum_number_of_configs_to_search() + ) + + # Batch sizes (8) * Instance groups (5) * queue delays (3) = 120 + # 10% of search space (120) = 12 + self.assertEquals(max_configs_to_search, 12) + + def test_max_number_of_configs_to_search_count(self): + """ + Test count based max num of configs to search + """ + config = self._create_config(additional_args=["--optuna_max_trials", "6"]) + + self._rcg._config = config + + max_configs_to_search = ( + self._rcg._determine_maximum_number_of_configs_to_search() + ) + + self.assertEquals(max_configs_to_search, 6) + + def test_max_number_of_configs_to_search_both(self): + """ + Test count based on specify both a count and percentage + """ + config = self._create_config( + additional_args=[ + "--optuna_max_trials", + "6", + "--max_percentage_of_search_space", + "3", + ] + ) + + self._rcg._config = config + + max_configs_to_search = ( + self._rcg._determine_maximum_number_of_configs_to_search() + ) + + # Since both are specified we will use the smaller of the two (3% of 120 = 3) + self.assertEquals(max_configs_to_search, 3) + def test_create_default_run_config(self): """ Test that a default run config is properly created